[llvm] 1ee315a - [AArch64] Convert tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 03:40:05 PST 2024


Author: Nikita Popov
Date: 2024-02-05T12:39:51+01:00
New Revision: 1ee315ae7964c8433b772e0b5d667834994ba753

URL: https://github.com/llvm/llvm-project/commit/1ee315ae7964c8433b772e0b5d667834994ba753
DIFF: https://github.com/llvm/llvm-project/commit/1ee315ae7964c8433b772e0b5d667834994ba753.diff

LOG: [AArch64] Convert tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-debugloc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-blockaddress.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir
    llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
    llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
    llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-xclass-copies.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-cornercases.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-blockaddress.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/sext-inreg-ldrow-16b.mir
    llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
    llvm/test/CodeGen/AArch64/GlobalISel/store-merging.mir
    llvm/test/CodeGen/AArch64/PBQP-csr.ll
    llvm/test/CodeGen/AArch64/a55-fuse-address.mir
    llvm/test/CodeGen/AArch64/aarch64-dup-ext-crash.ll
    llvm/test/CodeGen/AArch64/aarch64-ldst-no-premature-sp-pop.mir
    llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
    llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir
    llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll
    llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll
    llvm/test/CodeGen/AArch64/add-i256.ll
    llvm/test/CodeGen/AArch64/addrsig-macho.ll
    llvm/test/CodeGen/AArch64/align-down.ll
    llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
    llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-bad-outline.mir
    llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
    llvm/test/CodeGen/AArch64/arm64-ldp.ll
    llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
    llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
    llvm/test/CodeGen/AArch64/arm64-non-pow2-ldst.ll
    llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
    llvm/test/CodeGen/AArch64/arm64-zip.ll
    llvm/test/CodeGen/AArch64/branch-relax-block-size.mir
    llvm/test/CodeGen/AArch64/compute-call-frame-size-unreachable-pass.ll
    llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll
    llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
    llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll
    llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
    llvm/test/CodeGen/AArch64/divrem.ll
    llvm/test/CodeGen/AArch64/dont-shrink-wrap-stack-mayloadorstore.mir
    llvm/test/CodeGen/AArch64/early-ifcvt-regclass-mismatch.mir
    llvm/test/CodeGen/AArch64/elim-dead-mi.mir
    llvm/test/CodeGen/AArch64/expand-blr-rvmarker-pseudo.mir
    llvm/test/CodeGen/AArch64/fmov-imm-licm.ll
    llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
    llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
    llvm/test/CodeGen/AArch64/irg-nomem.mir
    llvm/test/CodeGen/AArch64/ldradr.ll
    llvm/test/CodeGen/AArch64/ldst-nopreidx-sp-redzone.mir
    llvm/test/CodeGen/AArch64/ldst-opt-aa.mir
    llvm/test/CodeGen/AArch64/ldst-opt-non-imm-offset.mir
    llvm/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir
    llvm/test/CodeGen/AArch64/machine-combiner-fmul-dup.mir
    llvm/test/CodeGen/AArch64/machine-outliner-bti.mir
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.mir
    llvm/test/CodeGen/AArch64/machine-scheduler.mir
    llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll
    llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll
    llvm/test/CodeGen/AArch64/merge-store.ll
    llvm/test/CodeGen/AArch64/multi-vector-load-size.ll
    llvm/test/CodeGen/AArch64/nontemporal-load.ll
    llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll
    llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
    llvm/test/CodeGen/AArch64/rvmarker-pseudo-expansion-and-outlining.mir
    llvm/test/CodeGen/AArch64/sched-movprfx.ll
    llvm/test/CodeGen/AArch64/settag-merge.mir
    llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
    llvm/test/CodeGen/AArch64/speculation-hardening-sls.mir
    llvm/test/CodeGen/AArch64/speculation-hardening.mir
    llvm/test/CodeGen/AArch64/spillfill-sve.ll
    llvm/test/CodeGen/AArch64/stack-guard-reassign-sve.mir
    llvm/test/CodeGen/AArch64/stack-guard-reassign.mir
    llvm/test/CodeGen/AArch64/stack-guard-sve.ll
    llvm/test/CodeGen/AArch64/stack-probing-64k.ll
    llvm/test/CodeGen/AArch64/stack-tagging-cfi.ll
    llvm/test/CodeGen/AArch64/stp-opt-with-renaming-ld3.mir
    llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
    llvm/test/CodeGen/AArch64/sub-of-bias.ll
    llvm/test/CodeGen/AArch64/sve-alloca-stackid.ll
    llvm/test/CodeGen/AArch64/sve-alloca.ll
    llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
    llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
    llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
    llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
    llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll
    llvm/test/CodeGen/AArch64/sve-fp.ll
    llvm/test/CodeGen/AArch64/sve-fpext-load.ll
    llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
    llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
    llvm/test/CodeGen/AArch64/sve-gep.ll
    llvm/test/CodeGen/AArch64/sve-insert-vector.ll
    llvm/test/CodeGen/AArch64/sve-int-arith.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll
    llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-ld1r.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather.ll
    llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
    llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
    llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll
    llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
    llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-redundant-store.ll
    llvm/test/CodeGen/AArch64/sve-setcc.ll
    llvm/test/CodeGen/AArch64/sve-split-load.ll
    llvm/test/CodeGen/AArch64/sve-split-store.ll
    llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
    llvm/test/CodeGen/AArch64/sve-trunc.ll
    llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
    llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-ld1-single.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-loads.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-stores.ll
    llvm/test/CodeGen/AArch64/sve2p1-intrinsics-st1-single.ll
    llvm/test/CodeGen/AArch64/swift-error-unreachable-use.ll
    llvm/test/CodeGen/AArch64/taildup-addrtaken.mir
    llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
    llvm/test/CodeGen/AArch64/tiny-model-pic.ll
    llvm/test/CodeGen/AArch64/tiny-model-static.ll
    llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
    llvm/test/CodeGen/AArch64/v3f-to-int.ll
    llvm/test/CodeGen/AArch64/win-catchpad-nested-cxx.ll
    llvm/test/CodeGen/AArch64/wineh-frame5.mir
    llvm/test/CodeGen/AArch64/wineh-frame6.mir
    llvm/test/CodeGen/AArch64/wineh-frame7.mir
    llvm/test/CodeGen/AArch64/wineh-frame8.mir
    llvm/test/CodeGen/AArch64/wineh5.mir
    llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir
    llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
    llvm/test/CodeGen/AArch64/zero-reg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
index 7092bae1cbb19..cec8d9ea5ba9d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
@@ -25,16 +25,16 @@
   entry:
     ret void
   }
-  define void @phiPropagation(i32* %src, i32* %dst, i1 %cond) {
+  define void @phiPropagation(ptr %src, ptr %dst, i1 %cond) {
   entry:
-    %srcVal = load i32, i32* %src
+    %srcVal = load i32, ptr %src
     br i1 %cond, label %end, label %then
   then:
     %res = add i32 %srcVal, 36
     br label %end
   end:
     %toStore = phi i32 [ %srcVal, %entry ], [ %res, %then ]
-    store i32 %toStore, i32* %dst
+    store i32 %toStore, ptr %dst
     ret void
   }
   define void @defaultMappingUseRepairPhysReg() {
@@ -71,26 +71,26 @@
   define void @copy_s128_from_load() { ret void }
   define void @copy_fp16() { ret void }
 
-  define i64 @greedyWithChainOfComputation(i64 %arg1, <2 x i32>* %addr) {
+  define i64 @greedyWithChainOfComputation(i64 %arg1, ptr %addr) {
     %varg1 = bitcast i64 %arg1 to <2 x i32>
-    %varg2 = load <2 x i32>, <2 x i32>* %addr
+    %varg2 = load <2 x i32>, ptr %addr
     %vres = or <2 x i32> %varg1, %varg2
     %res = bitcast <2 x i32> %vres to i64
     ret i64 %res
   }
 
-  define i64 @floatingPointLoad(i64 %arg1, double* %addr) {
+  define i64 @floatingPointLoad(i64 %arg1, ptr %addr) {
     %varg1 = bitcast i64 %arg1 to double
-    %varg2 = load double, double* %addr
+    %varg2 = load double, ptr %addr
     %vres = fadd double %varg1, %varg2
     %res = bitcast double %vres to i64
     ret i64 %res
   }
 
-  define void @floatingPointStore(i64 %arg1, double* %addr) {
+  define void @floatingPointStore(i64 %arg1, ptr %addr) {
     %varg1 = bitcast i64 %arg1 to double
     %vres = fadd double %varg1, %varg1
-    store double %vres, double* %addr
+    store double %vres, ptr %addr
     ret void
   }
 
@@ -106,8 +106,8 @@
   define half @passFp16ViaAllocas(half %p) {
   entry:
     %p.addr = alloca half, align 2
-    store half %p, half* %p.addr, align 2
-    %0 = load half, half* %p.addr, align 2
+    store half %p, ptr %p.addr, align 2
+    %0 = load half, ptr %p.addr, align 2
     ret half %0
   }
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir
index 4c0e191f7196d..7296e10b4a61e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir
@@ -10,17 +10,17 @@
 
   @.str = external dso_local unnamed_addr constant [4 x i8], align 1
 
-  define void @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !4 {
+  define void @main() personality ptr @__gxx_personality_v0 !dbg !4 {
   entry:
     %tobool = trunc i8 undef to i1
     %conv = zext i1 %tobool to i32
-    call void (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %conv), !dbg !8
-    %0 = load i32, i32* undef, align 4, !dbg !9
-    call void (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32 %0)
+    call void (ptr, ...) @printf(ptr @.str, i32 %conv), !dbg !8
+    %0 = load i32, ptr undef, align 4, !dbg !9
+    call void (ptr, ...) @printf(ptr @.str, i32 %0)
     ret void
   }
 
-  declare void @printf(i8*, ...)
+  declare void @printf(ptr, ...)
   declare i32 @__gxx_personality_v0(...)
 
   !llvm.dbg.cu = !{!0}
@@ -67,7 +67,7 @@ body:             |
     G_STORE %8(s64), %7(p0), debug-location !8 :: (store (s64) into stack, align 1)
     BL @printf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, debug-location !8
     ADJCALLSTACKUP 8, 0, implicit-def $sp, implicit $sp, debug-location !8
-    %13:_(s64) = G_LOAD %10(p0), debug-location !9 :: (load (s32) from `i32* undef`)
+    %13:_(s64) = G_LOAD %10(p0), debug-location !9 :: (load (s32) from `ptr undef`)
     ADJCALLSTACKDOWN 8, 0, implicit-def $sp, implicit $sp
     $x0 = COPY %4(p0)
     %11:_(p0) = COPY $sp

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-debugloc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-debugloc.mir
index b0d7b04ed50a9..719642103e1a2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-debugloc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-debugloc.mir
@@ -8,14 +8,14 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-unknown-unknown"
   
-  define i64 @main(i8 %pat) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !4 {
+  define i64 @main(i8 %pat) personality ptr @__gxx_personality_v0 !dbg !4 {
   entry:
     %sext.1 = zext i8 %pat to i16, !dbg !8
     %sext.zext.1 = sext i16 %sext.1 to i64, !dbg !9
     ret i64 %sext.zext.1
   }
   
-  declare void @printf(i8*, ...)
+  declare void @printf(ptr, ...)
   
   declare i32 @__gxx_personality_v0(...)
   

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir
index 8e65483583a00..d24fb62ffab24 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir
@@ -6,8 +6,8 @@
   define fp128 @x(fp128 %a) {
   entry:
     %a.addr = alloca fp128, align 16
-    store fp128 %a, fp128* %a.addr, align 16
-    %0 = load fp128, fp128* %a.addr, align 16
+    store fp128 %a, ptr %a.addr, align 16
+    %0 = load fp128, ptr %a.addr, align 16
     %sub = fsub fp128 0xL00000000000000008000000000000000, %0
     ret fp128 %sub
   }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
index dd7db6eec8ef5..065f581c599af 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
@@ -7,7 +7,7 @@
   target triple = "aarch64"
 
   ; Function Attrs: noinline nounwind optnone
-  define void @fp16_to_gpr([2 x half], [2 x half]* %addr) {
+  define void @fp16_to_gpr([2 x half], ptr %addr) {
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
index ee00092fb32c6..b83985fa32638 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
@@ -4,13 +4,13 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "arm64-apple-darwin"
 
-  declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) #0
+  declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #0
 
-  define void @test_memcpy_inline(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr {
+  define void @test_memcpy_inline(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 143, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.inline.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 143, i1 false)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
index aaf2cb0476b91..0a22c7d1f94bb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
@@ -4,54 +4,54 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "arm64-apple-darwin"
 
-  define void @test_memcpy1(i32* nocapture %dst, i32* nocapture readonly %src, i64 %len) local_unnamed_addr #0 {
+  define void @test_memcpy1(ptr nocapture %dst, ptr nocapture readonly %src, i64 %len) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 %len, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 %len, i1 false)
     ret void
   }
 
-  declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) #1
-  declare void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* nocapture writeonly, i8 addrspace(2)* nocapture readonly, i64, i1 immarg) #1
+  declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #1
+  declare void @llvm.memcpy.p1.p2.i64(ptr addrspace(1) nocapture writeonly, ptr addrspace(2) nocapture readonly, i64, i1 immarg) #1
 
-  define void @test_memcpy2_const(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
+  define void @test_memcpy2_const(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 72, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 72, i1 false)
     ret void
   }
 
-  define void @test_memcpy2_const_optsize(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #2 {
+  define void @test_memcpy2_const_optsize(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #2 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 72, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 72, i1 false)
     ret void
   }
 
-  define void @test_memcpy2_const_minsize(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #3 {
+  define void @test_memcpy2_const_minsize(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #3 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 72, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 72, i1 false)
     ret void
   }
 
-  define void @test_memcpy3_const_arrays_unaligned(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
+  define void @test_memcpy3_const_arrays_unaligned(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 143, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 143, i1 false)
     ret void
   }
 
-  define void @test_memcpy_addrspace(i32 addrspace(1)* nocapture %dst, i32 addrspace(2)* nocapture readonly %src) local_unnamed_addr #0 {
+  define void @test_memcpy_addrspace(ptr addrspace(1) nocapture %dst, ptr addrspace(2) nocapture readonly %src) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32 addrspace(1)* %dst to i8 addrspace(1)*
-    %1 = bitcast i32 addrspace(2)* %src to i8 addrspace(2)*
-    tail call void @llvm.memcpy.p1i8.p2i8.i64(i8 addrspace(1)* align 4 %0, i8 addrspace(2)* align 4 %1, i64 72, i1 false)
+    %0 = bitcast ptr addrspace(1) %dst to ptr addrspace(1)
+    %1 = bitcast ptr addrspace(2) %src to ptr addrspace(2)
+    tail call void @llvm.memcpy.p1.p2.i64(ptr addrspace(1) align 4 %0, ptr addrspace(2) align 4 %1, i64 72, i1 false)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
index a86e7ef6dc729..f31b64ece8957 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
@@ -4,46 +4,46 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define void @test_memmove1(i32* nocapture %dst, i32* nocapture readonly %src, i64 %len) local_unnamed_addr #0 {
+  define void @test_memmove1(ptr nocapture %dst, ptr nocapture readonly %src, i64 %len) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 %len, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memmove.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 %len, i1 false)
     ret void
   }
 
-  declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1 immarg) #1
-  declare void @llvm.memmove.p1i8.p2i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(2)* nocapture readonly, i64, i1 immarg) #1
+  declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1 immarg) #1
+  declare void @llvm.memmove.p1.p2.i64(ptr addrspace(1) nocapture, ptr addrspace(2) nocapture readonly, i64, i1 immarg) #1
 
-  define void @test_memmove2_const(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
+  define void @test_memmove2_const(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 48, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memmove.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 48, i1 false)
     ret void
   }
 
-  define void @test_memmove3_const_toolarge(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
+  define void @test_memmove3_const_toolarge(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 96, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memmove.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 96, i1 false)
     ret void
   }
 
-  define void @test_memmove4_const_unaligned(i32* nocapture %dst, i32* nocapture readonly %src) local_unnamed_addr #0 {
+  define void @test_memmove4_const_unaligned(ptr nocapture %dst, ptr nocapture readonly %src) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 52, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memmove.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 52, i1 false)
     ret void
   }
 
-  define void @test_memmove_addrspace(i32 addrspace(1)* nocapture %dst, i32 addrspace(2)* nocapture readonly %src) local_unnamed_addr #0 {
+  define void @test_memmove_addrspace(ptr addrspace(1) nocapture %dst, ptr addrspace(2) nocapture readonly %src) local_unnamed_addr #0 {
   entry:
-    %0 = bitcast i32 addrspace(1)* %dst to i8 addrspace(1)*
-    %1 = bitcast i32 addrspace(2)* %src to i8 addrspace(2)*
-    tail call void @llvm.memmove.p1i8.p2i8.i64(i8 addrspace(1)* align 4 %0, i8 addrspace(2)* align 4 %1, i64 8, i1 false)
+    %0 = bitcast ptr addrspace(1) %dst to ptr addrspace(1)
+    %1 = bitcast ptr addrspace(2) %src to ptr addrspace(2)
+    tail call void @llvm.memmove.p1.p2.i64(ptr addrspace(1) align 4 %0, ptr addrspace(2) align 4 %1, i64 8, i1 false)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
index a02fb08342c16..00c7fc4cab3ab 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
@@ -4,51 +4,51 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define void @test_ms1(i8* nocapture %dst, i32 %c, i32 %len) local_unnamed_addr #0 {
+  define void @test_ms1(ptr nocapture %dst, i32 %c, i32 %len) local_unnamed_addr #0 {
   entry:
     %0 = trunc i32 %c to i8
     %conv = zext i32 %len to i64
-    tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %0, i64 %conv, i1 false)
+    tail call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %0, i64 %conv, i1 false)
     ret void
   }
 
-  declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #1
+  declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
 
-  define void @test_ms2_const(i8* nocapture %dst, i32 %c) local_unnamed_addr #0 {
+  define void @test_ms2_const(ptr nocapture %dst, i32 %c) local_unnamed_addr #0 {
   entry:
     %0 = trunc i32 %c to i8
-    tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %0, i64 16, i1 false)
+    tail call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %0, i64 16, i1 false)
     ret void
   }
 
-  define void @test_zero_const(i8* nocapture %dst) local_unnamed_addr #0 {
+  define void @test_zero_const(ptr nocapture %dst) local_unnamed_addr #0 {
   entry:
-    tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 64, i1 false)
+    tail call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 64, i1 false)
     ret void
   }
 
-  define void @test_ms3_const_both(i8* nocapture %dst) local_unnamed_addr #0 {
+  define void @test_ms3_const_both(ptr nocapture %dst) local_unnamed_addr #0 {
   entry:
-    tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 64, i64 16, i1 false)
+    tail call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 64, i64 16, i1 false)
     ret void
   }
 
-  define void @test_ms_vector(i8* nocapture %dst, i32 %c) local_unnamed_addr #0 {
+  define void @test_ms_vector(ptr nocapture %dst, i32 %c) local_unnamed_addr #0 {
   entry:
     %0 = trunc i32 %c to i8
-    tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %0, i64 16, i1 false)
+    tail call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %0, i64 16, i1 false)
     ret void
   }
 
-  define void @test_ms4_const_both_unaligned(i8* nocapture %dst) local_unnamed_addr #0 {
+  define void @test_ms4_const_both_unaligned(ptr nocapture %dst) local_unnamed_addr #0 {
   entry:
-    tail call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 64, i64 18, i1 false)
+    tail call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 64, i64 18, i1 false)
     ret void
   }
 
-  define void @minsize(i8* nocapture %dst) minsize { unreachable }
+  define void @minsize(ptr nocapture %dst) minsize { unreachable }
 
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2,+zcm,+zcz" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
index 552fd8174e79c..699594b3d3e9a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
@@ -4,21 +4,21 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "arm64-apple-darwin"
 
-  declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg) #1
+  declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg) #1
 
-  define void @test_small_memcpy(i32* nocapture %dst, i32* nocapture readonly %src) {
+  define void @test_small_memcpy(ptr nocapture %dst, ptr nocapture readonly %src) {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 32, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 32, i1 false)
     ret void
   }
 
-  define void @test_large_memcpy(i32* nocapture %dst, i32* nocapture readonly %src) {
+  define void @test_large_memcpy(ptr nocapture %dst, ptr nocapture readonly %src) {
   entry:
-    %0 = bitcast i32* %dst to i8*
-    %1 = bitcast i32* %src to i8*
-    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 36, i1 false)
+    %0 = bitcast ptr %dst to ptr
+    %1 = bitcast ptr %src to ptr
+    tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 36, i1 false)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
index f34766642623b..d5d0a1b122352 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
@@ -4,10 +4,10 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @cmpxchg_i8(i8* %addr) { ret void }
-  define void @cmpxchg_i16(i16* %addr) { ret void }
-  define void @cmpxchg_i32(i32* %addr) { ret void }
-  define void @cmpxchg_i64(i64* %addr) { ret void }
+  define void @cmpxchg_i8(ptr %addr) { ret void }
+  define void @cmpxchg_i16(ptr %addr) { ret void }
+  define void @cmpxchg_i32(ptr %addr) { ret void }
+  define void @cmpxchg_i64(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-blockaddress.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-blockaddress.mir
index 251c1df37e163..84ea6b89e4e14 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-blockaddress.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-blockaddress.mir
@@ -4,11 +4,11 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-none-linux-gnu"
 
-  @addr = dso_local global i8* null
+  @addr = dso_local global ptr null
 
   define dso_local void @test_blockaddress() {
-    store i8* blockaddress(@test_blockaddress, %block), i8** @addr
-    indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block]
+    store ptr blockaddress(@test_blockaddress, %block), ptr @addr
+    indirectbr ptr blockaddress(@test_blockaddress, %block), [label %block]
 
   block:                                            ; preds = %0
     ret void

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
index e0f07e1fdb845..3c010789a2b7f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
@@ -4,8 +4,8 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @cmpxchg_i32(i64* %addr) { ret void }
-  define void @cmpxchg_i64(i64* %addr) { ret void }
+  define void @cmpxchg_i32(ptr %addr) { ret void }
+  define void @cmpxchg_i64(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
index ad0d0854875e6..2ab14afdb8e1e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
@@ -4,10 +4,10 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @cmpxchg_i8(i8* %addr) { ret void }
-  define void @cmpxchg_i16(i16* %addr) { ret void }
-  define void @cmpxchg_i32(i32* %addr) { ret void }
-  define void @cmpxchg_i64(i64* %addr) { ret void }
+  define void @cmpxchg_i8(ptr %addr) { ret void }
+  define void @cmpxchg_i16(ptr %addr) { ret void }
+  define void @cmpxchg_i32(ptr %addr) { ret void }
+  define void @cmpxchg_i64(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir
index c0a286b0a1ca0..a1eb45e1b7991 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-dyn-alloca.mir
@@ -4,34 +4,34 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define i8* @test_simple_alloca(i32 %numelts) {
+  define ptr @test_simple_alloca(i32 %numelts) {
     %addr = alloca i8, i32 %numelts
-    ret i8* %addr
+    ret ptr %addr
   }
 
-  define i8* @test_aligned_alloca(i32 %numelts) {
+  define ptr @test_aligned_alloca(i32 %numelts) {
     %addr = alloca i8, i32 %numelts, align 32
-    ret i8* %addr
+    ret ptr %addr
   }
 
-  define i128* @test_natural_alloca(i32 %numelts) {
+  define ptr @test_natural_alloca(i32 %numelts) {
     %addr = alloca i128, i32 %numelts
-    ret i128* %addr
+    ret ptr %addr
   }
 
-  define i8* @test_simple_alloca_stack_probing(i32 %numelts) "probe-stack"="inline-asm" {
+  define ptr @test_simple_alloca_stack_probing(i32 %numelts) "probe-stack"="inline-asm" {
     %addr = alloca i8, i32 %numelts
-    ret i8* %addr
+    ret ptr %addr
   }
 
-  define i8* @test_aligned_alloca_stack_probing(i32 %numelts) "probe-stack"="inline-asm" {
+  define ptr @test_aligned_alloca_stack_probing(i32 %numelts) "probe-stack"="inline-asm" {
     %addr = alloca i8, i32 %numelts, align 32
-    ret i8* %addr
+    ret ptr %addr
   }
 
-  define i128* @test_natural_alloca_stack_probing(i32 %numelts) "probe-stack"="inline-asm" {
+  define ptr @test_natural_alloca_stack_probing(i32 %numelts) "probe-stack"="inline-asm" {
     %addr = alloca i128, i32 %numelts
-    ret i128* %addr
+    ret ptr %addr
   }
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
index 706bab2d0092f..37fad041311dc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
@@ -5,8 +5,8 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64--"
   @var = external global i8
-  define i8* @test_global() { ret i8* undef }
-  define i8* @test_global_with_offset() { ret i8* undef }
+  define ptr @test_global() { ret ptr undef }
+  define ptr @test_global_with_offset() { ret ptr undef }
 ...
 ---
 name:            test_global

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
index 4338db9df94a4..18c861b3d20e7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
@@ -6,8 +6,8 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64--"
   @var = external dso_local global i8
-  define i8* @test_global() { ret i8* undef }
-  define i8* @test_global_with_offset() { ret i8* undef }
+  define ptr @test_global() { ret ptr undef }
+  define ptr @test_global_with_offset() { ret ptr undef }
 ...
 ---
 name:            test_global

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
index f0082ddf30e20..a8f373f4f6135 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
@@ -5,17 +5,17 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define void @store_v2p0(<2 x i8*> %v, <2 x i8*>* %ptr) {
-    store <2 x i8*> %v, <2 x i8*>* %ptr
+  define void @store_v2p0(<2 x ptr> %v, ptr %ptr) {
+    store <2 x ptr> %v, ptr %ptr
     ret void
   }
 
-  define <2 x i8*> @load_v2p0(<2 x i8*>* %ptr) {
-    %v = load <2 x i8*>, <2 x i8*>* %ptr
-    ret <2 x i8*> %v
+  define <2 x ptr> @load_v2p0(ptr %ptr) {
+    %v = load <2 x ptr>, ptr %ptr
+    ret <2 x ptr> %v
   }
 
-  define void @load_v2p1(<2 x i8*>* %ptr) { ret void }
+  define void @load_v2p1(ptr %ptr) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
index 653fe064914af..0a8567f2b9ba6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memlib-debug-loc.mir
@@ -3,15 +3,15 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "arm64-apple-ios13.0.0"
 
-  define void @test_memset_debug(i8* %ptr, i32 %c, i32 %len) local_unnamed_addr!dbg !9 {
+  define void @test_memset_debug(ptr %ptr, i32 %c, i32 %len) local_unnamed_addr!dbg !9 {
   entry:
     %conv = zext i32 %len to i64, !dbg !11
     %0 = trunc i32 %c to i8, !dbg !11
-    call void @llvm.memset.p0i8.i64(i8* align 1 %ptr, i8 %0, i64 %conv, i1 false) #3, !dbg !11
+    call void @llvm.memset.p0.i64(ptr align 1 %ptr, i8 %0, i64 %conv, i1 false) #3, !dbg !11
     ret void, !dbg !12
   }
 
-  declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #1
+  declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
   attributes #1 = { argmemonly nounwind willreturn writeonly }
 
   !llvm.module.flags = !{!0, !1, !2, !3, !4}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
index f1aee90bb07dd..a2959e6ca4e0e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
@@ -8,17 +8,17 @@
     br label %bb1
 
   bb1:                                              ; preds = %bb3, %bb
-    %lsr.iv = phi i16* [ %scevgep, %bb3 ], [ undef, %bb ]
+    %lsr.iv = phi ptr [ %scevgep, %bb3 ], [ undef, %bb ]
     %tmp = phi i1 [ %tmp9, %bb3 ], [ undef, %bb ]
     br i1 %tmp, label %bb10, label %bb3
 
   bb3:                                              ; preds = %bb1
-    %tmp5 = getelementptr i16, i16* null, i64 2
-    %tmp6 = load i16, i16* %lsr.iv, align 2, !tbaa !0
+    %tmp5 = getelementptr i16, ptr null, i64 2
+    %tmp6 = load i16, ptr %lsr.iv, align 2, !tbaa !0
     %tmp7 = icmp eq i16 %tmp6, -1
-    %tmp8 = load i16, i16* %tmp5, align 2, !tbaa !0
+    %tmp8 = load i16, ptr %tmp5, align 2, !tbaa !0
     %tmp9 = icmp eq i16 %tmp8, -1
-    %scevgep = getelementptr i16, i16* %lsr.iv, i64 2
+    %scevgep = getelementptr i16, ptr %lsr.iv, i64 2
     br i1 %tmp7, label %bb10, label %bb1
 
   bb10:                                             ; preds = %bb3, %bb1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir
index 0f0f2218c5809..49e03251daa6c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-s128-div.mir
@@ -4,9 +4,9 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-apple-ios"
 
-  define void @udiv_test(i128* %v1ptr, i128* %v2ptr) { ret void }
+  define void @udiv_test(ptr %v1ptr, ptr %v2ptr) { ret void }
 
-  define void @sdiv_test(i128* %v1ptr, i128* %v2ptr) { ret void }
+  define void @sdiv_test(ptr %v1ptr, ptr %v2ptr) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 720d9ad13aa07..0cf9602adbb09 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -2,27 +2,27 @@
 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define void @ldrxrox_breg_oreg(i64* %addr) { ret void }
-  define void @ldrdrox_breg_oreg(i64* %addr) { ret void }
-  define void @more_than_one_use(i64* %addr) { ret void }
-  define void @ldrxrox_shl(i64* %addr) { ret void }
-  define void @ldrdrox_shl(i64* %addr) { ret void }
-  define void @ldrxrox_mul_rhs(i64* %addr) { ret void }
-  define void @ldrdrox_mul_rhs(i64* %addr) { ret void }
-  define void @ldrxrox_mul_lhs(i64* %addr) { ret void }
-  define void @ldrdrox_mul_lhs(i64* %addr) { ret void }
-  define void @mul_not_pow_2(i64* %addr) { ret void }
-  define void @mul_wrong_pow_2(i64* %addr) { ret void }
-  define void @more_than_one_use_shl_1(i64* %addr) { ret void }
-  define void @more_than_one_use_shl_2(i64* %addr) { ret void }
-  define void @more_than_one_use_shl_lsl_fast(i64* %addr) #1 { ret void }
-  define void @more_than_one_use_shl_lsl_slow(i64* %addr) { ret void }
-  define void @more_than_one_use_shl_minsize(i64* %addr) #0 { ret void }
-  define void @ldrwrox(i64* %addr) { ret void }
-  define void @ldrsrox(i64* %addr) { ret void }
-  define void @ldrhrox(i64* %addr) { ret void }
-  define void @ldbbrox(i64* %addr) { ret void }
-  define void @ldrqrox(i64* %addr) { ret void }
+  define void @ldrxrox_breg_oreg(ptr %addr) { ret void }
+  define void @ldrdrox_breg_oreg(ptr %addr) { ret void }
+  define void @more_than_one_use(ptr %addr) { ret void }
+  define void @ldrxrox_shl(ptr %addr) { ret void }
+  define void @ldrdrox_shl(ptr %addr) { ret void }
+  define void @ldrxrox_mul_rhs(ptr %addr) { ret void }
+  define void @ldrdrox_mul_rhs(ptr %addr) { ret void }
+  define void @ldrxrox_mul_lhs(ptr %addr) { ret void }
+  define void @ldrdrox_mul_lhs(ptr %addr) { ret void }
+  define void @mul_not_pow_2(ptr %addr) { ret void }
+  define void @mul_wrong_pow_2(ptr %addr) { ret void }
+  define void @more_than_one_use_shl_1(ptr %addr) { ret void }
+  define void @more_than_one_use_shl_2(ptr %addr) { ret void }
+  define void @more_than_one_use_shl_lsl_fast(ptr %addr) #1 { ret void }
+  define void @more_than_one_use_shl_lsl_slow(ptr %addr) { ret void }
+  define void @more_than_one_use_shl_minsize(ptr %addr) #0 { ret void }
+  define void @ldrwrox(ptr %addr) { ret void }
+  define void @ldrsrox(ptr %addr) { ret void }
+  define void @ldrhrox(ptr %addr) { ret void }
+  define void @ldbbrox(ptr %addr) { ret void }
+  define void @ldrqrox(ptr %addr) { ret void }
   attributes #0 = { optsize }
   attributes #1 = { "target-features"="+addr-lsl-fast" }
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
index 942844e0d0444..c894e87760e12 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
@@ -23,15 +23,15 @@
 
   define i32 @intrablock_with_globalvalue() {
   entry:
-    %0 = load i32, i32* @var1, align 4
+    %0 = load i32, ptr @var1, align 4
     %cmp = icmp eq i32 %0, 1
     br i1 %cmp, label %if.then, label %if.end
 
   if.then:
-    store i32 2, i32* @var2, align 4
-    store i32 3, i32* @var1, align 4
-    store i32 2, i32* @var3, align 4
-    store i32 3, i32* @var1, align 4
+    store i32 2, ptr @var2, align 4
+    store i32 3, ptr @var1, align 4
+    store i32 2, ptr @var3, align 4
+    store i32 3, ptr @var1, align 4
     br label %if.end
 
   if.end:
@@ -39,15 +39,15 @@
   }
   define i32 @adrp_add() {
   entry:
-    %0 = load i32, i32* @var1, align 4
+    %0 = load i32, ptr @var1, align 4
     %cmp = icmp eq i32 %0, 1
     br i1 %cmp, label %if.then, label %if.end
 
   if.then:
-    store i32 2, i32* @var2, align 4
-    store i32 3, i32* @var1, align 4
-    store i32 2, i32* @var3, align 4
-    store i32 3, i32* @var1, align 4
+    store i32 2, ptr @var2, align 4
+    store i32 3, ptr @var1, align 4
+    store i32 2, ptr @var3, align 4
+    store i32 3, ptr @var1, align 4
     br label %if.end
 
   if.end:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir b/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
index 485535fc5625c..7d03da69ab942 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
@@ -4,8 +4,8 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define i32 @ld_zext_i24(i24* %ptr, i24* %ptr2) {
-    %load = load i24, i24* %ptr, align 1
+  define i32 @ld_zext_i24(ptr %ptr, ptr %ptr2) {
+    %load = load i24, ptr %ptr, align 1
     %ext = zext i24 %load to i32
     ret i32 %ext
   }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir
index 7b3547159f18c..f4dc5ecd4e03f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir
@@ -4,11 +4,11 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64--"
-  define void @test_zeroext(i8* %addr) {
+  define void @test_zeroext(ptr %addr) {
   entry:
     ret void
   }
-  define void @test_s32_to_s64(i8* %addr) {
+  define void @test_s32_to_s64(ptr %addr) {
   entry:
     ret void
   }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-xclass-copies.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-xclass-copies.mir
index ea9c3881c6c3b..4bd5c14e5e745 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-xclass-copies.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-xclass-copies.mir
@@ -3,7 +3,7 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  @x = external hidden local_unnamed_addr global i32*, align 8
+  @x = external hidden local_unnamed_addr global ptr, align 8
   define void @copy_from_larger_rc_def() { ret void }
   define void @copy_from_larger_rc_def_multi_use() { ret void }
   define void @copy_from_smaller_rc_def() { ret void }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
index 27d5ad126fd23..7666b2fb8368f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
@@ -3,33 +3,33 @@
 # REQUIRES: asserts
 
 --- |
-  define i1 @eq_true(i32* %ptr) { unreachable }
-  define i1 @ne_true(i32* %ptr) { unreachable }
-  define i1 @sge_true(i32* %ptr) { unreachable }
-  define i1 @sgt_true(i32* %ptr) { unreachable }
-  define i1 @sle_true(i32* %ptr) { unreachable }
-  define i1 @slt_true(i32* %ptr) { unreachable }
-  define i1 @uge_true(i32* %ptr) { unreachable }
-  define i1 @ugt_true(i32* %ptr) { unreachable }
-  define i1 @ule_true(i32* %ptr) { unreachable }
-  define i1 @ult_true(i32* %ptr) { unreachable }
-
-  define i1 @eq_false(i32* %ptr) { unreachable }
-  define i1 @ne_false(i32* %ptr) { unreachable }
-  define i1 @sge_false(i32* %ptr) { unreachable }
-  define i1 @sgt_false(i32* %ptr) { unreachable }
-  define i1 @sle_false(i32* %ptr) { unreachable }
-  define i1 @slt_false(i32* %ptr) { unreachable }
-  define i1 @uge_false(i32* %ptr) { unreachable }
-  define i1 @ugt_false(i32* %ptr) { unreachable }
-  define i1 @ule_false(i32* %ptr) { unreachable }
-  define i1 @ult_false(i32* %ptr) { unreachable }
-
-  define i1 @eq_unknown(i32* %ptr) { unreachable }
-  define i1 @ne_unknown(i32* %ptr) { unreachable }
-
-  define i1 @vector_true(i32* %ptr) { unreachable }
-  define i1 @vector_false(i32* %ptr) { unreachable }
+  define i1 @eq_true(ptr %ptr) { unreachable }
+  define i1 @ne_true(ptr %ptr) { unreachable }
+  define i1 @sge_true(ptr %ptr) { unreachable }
+  define i1 @sgt_true(ptr %ptr) { unreachable }
+  define i1 @sle_true(ptr %ptr) { unreachable }
+  define i1 @slt_true(ptr %ptr) { unreachable }
+  define i1 @uge_true(ptr %ptr) { unreachable }
+  define i1 @ugt_true(ptr %ptr) { unreachable }
+  define i1 @ule_true(ptr %ptr) { unreachable }
+  define i1 @ult_true(ptr %ptr) { unreachable }
+
+  define i1 @eq_false(ptr %ptr) { unreachable }
+  define i1 @ne_false(ptr %ptr) { unreachable }
+  define i1 @sge_false(ptr %ptr) { unreachable }
+  define i1 @sgt_false(ptr %ptr) { unreachable }
+  define i1 @sle_false(ptr %ptr) { unreachable }
+  define i1 @slt_false(ptr %ptr) { unreachable }
+  define i1 @uge_false(ptr %ptr) { unreachable }
+  define i1 @ugt_false(ptr %ptr) { unreachable }
+  define i1 @ule_false(ptr %ptr) { unreachable }
+  define i1 @ult_false(ptr %ptr) { unreachable }
+
+  define i1 @eq_unknown(ptr %ptr) { unreachable }
+  define i1 @ne_unknown(ptr %ptr) { unreachable }
+
+  define i1 @vector_true(ptr %ptr) { unreachable }
+  define i1 @vector_false(ptr %ptr) { unreachable }
 
   !0 = !{i32 1, i32 2}
   !1 = !{i32 1, i32 3}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
index 05eb831681e07..35dce4da92aa7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-copy-prop-disabled.mir
@@ -17,7 +17,7 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64--"
-  define void @test_copy(i8* %addr) {
+  define void @test_copy(ptr %addr) {
   entry:
     ret void
   }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-cornercases.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-cornercases.mir
index 2a9291634fe34..04968dab3a37c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-cornercases.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-cornercases.mir
@@ -9,7 +9,7 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64--"
 
-  define void @multiple_copies(i8* %addr) {
+  define void @multiple_copies(ptr %addr) {
   entry:
     br i1 0, label %if, label %else
   if:
@@ -20,7 +20,7 @@
     ret void
   }
 
-  define void @sink_to_phi_trivially_dominating(i8* %addr) {
+  define void @sink_to_phi_trivially_dominating(ptr %addr) {
   entry:
     br i1 0, label %if, label %exit
   if:
@@ -29,7 +29,7 @@
     ret void
   }
 
-  define void @sink_to_phi_nondominating(i8* %addr) {
+  define void @sink_to_phi_nondominating(ptr %addr) {
   entry:
     br i1 0, label %if, label %else
   if:
@@ -40,7 +40,7 @@
     ret void
   }
 
-  define void @sink_to_phi_emptyblock(i8* %addr) {
+  define void @sink_to_phi_emptyblock(ptr %addr) {
   entry:
     br i1 0, label %if, label %else
   if:
@@ -53,12 +53,12 @@
     ret void
   }
 
-  define void @use_doesnt_def_anything(i8* %addr) {
+  define void @use_doesnt_def_anything(ptr %addr) {
   entry:
     ret void
   }
 
-  define void @op0_isnt_a_reg(i8* %addr) {
+  define void @op0_isnt_a_reg(ptr %addr) {
   entry:
     ret void
   }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
index b8da4288c784e..c606cbf004437 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
@@ -3,7 +3,7 @@
 
 # Check we don't try to combine a load of < s8 as that will end up creating a illegal non-extending load.
 --- |
-  define i8 @test(i1* %ptr) {
+  define i8 @test(ptr %ptr) {
     ret i8 undef
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
index e29568698f8b2..677ab6a20e1ec 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
@@ -3,7 +3,7 @@
 
 --- |
 
-    @x = external hidden local_unnamed_addr global i32*, align 8
+    @x = external hidden local_unnamed_addr global ptr, align 8
 
     define void @select_add_low_without_offset() { ret void }
     define void @select_add_low_with_offset() { ret void }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
index 46e062c317650..5787f914b965d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir
@@ -4,8 +4,8 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define i8 @load_acq_i8(i8* %ptr) {
-    %v = load atomic i8, i8* %ptr acquire, align 8
+  define i8 @load_acq_i8(ptr %ptr) {
+    %v = load atomic i8, ptr %ptr acquire, align 8
     ret i8 %v
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
index c08334654be92..950aab38400fe 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
@@ -4,18 +4,18 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @atomicrmw_xchg_i64(i64* %addr) { ret void }
-  define void @atomicrmw_add_i64(i64* %addr) { ret void }
-  define void @atomicrmw_add_i32(i64* %addr) { ret void }
-  define void @atomicrmw_sub_i32(i64* %addr) { ret void }
-  define void @atomicrmw_and_i32(i64* %addr) { ret void }
+  define void @atomicrmw_xchg_i64(ptr %addr) { ret void }
+  define void @atomicrmw_add_i64(ptr %addr) { ret void }
+  define void @atomicrmw_add_i32(ptr %addr) { ret void }
+  define void @atomicrmw_sub_i32(ptr %addr) { ret void }
+  define void @atomicrmw_and_i32(ptr %addr) { ret void }
   ; nand isn't legal
-  define void @atomicrmw_or_i32(i64* %addr) { ret void }
-  define void @atomicrmw_xor_i32(i64* %addr) { ret void }
-  define void @atomicrmw_min_i32(i64* %addr) { ret void }
-  define void @atomicrmw_max_i32(i64* %addr) { ret void }
-  define void @atomicrmw_umin_i32(i64* %addr) { ret void }
-  define void @atomicrmw_umax_i32(i64* %addr) { ret void }
+  define void @atomicrmw_or_i32(ptr %addr) { ret void }
+  define void @atomicrmw_xor_i32(ptr %addr) { ret void }
+  define void @atomicrmw_min_i32(ptr %addr) { ret void }
+  define void @atomicrmw_max_i32(ptr %addr) { ret void }
+  define void @atomicrmw_umin_i32(ptr %addr) { ret void }
+  define void @atomicrmw_umax_i32(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-blockaddress.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-blockaddress.mir
index 28d279d742164..8d0b835f941ab 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-blockaddress.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-blockaddress.mir
@@ -7,11 +7,11 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-none-linux-gnu"
 
-  @addr = dso_local global i8* null
+  @addr = dso_local global ptr null
 
   define dso_local void @test_blockaddress() {
-    store i8* blockaddress(@test_blockaddress, %block), i8** @addr
-    indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block]
+    store ptr blockaddress(@test_blockaddress, %block), ptr @addr
+    indirectbr ptr blockaddress(@test_blockaddress, %block), [label %block]
 
   block:                                            ; preds = %0
     ret void

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
index dd2b15a6a560d..5202cec121ef3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
@@ -4,8 +4,8 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @cmpxchg_i32(i64* %addr) { ret void }
-  define void @cmpxchg_i64(i64* %addr) { ret void }
+  define void @cmpxchg_i32(ptr %addr) { ret void }
+  define void @cmpxchg_i64(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
index c280f000b174e..674299ab9d47e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
@@ -10,7 +10,7 @@
   define i64 @const_s64() { ret i64 1234567890123 }
   define i32 @const_s32_zero() { ret i32 0 }
   define i64 @const_s64_zero() { ret i64 0 }
-  define i8* @const_p0_0() { ret i8* null }
+  define ptr @const_p0_0() { ret ptr null }
 
   define i32 @fconst_s32() { ret i32 42 }
   define i64 @fconst_s64() { ret i64 1234567890123 }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
index 4ebeb2a773f30..a282c0f83d561 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
@@ -4,9 +4,9 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @aextload_s32_from_s16(i16 *%addr) { ret void }
+  define void @aextload_s32_from_s16(ptr %addr) { ret void }
 
-  define void @aextload_s32_from_s16_not_combined(i16 *%addr) { ret void }
+  define void @aextload_s32_from_s16_not_combined(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
index 9dbbc93ba5cfa..d4c1f23ca7ab0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define void @test_load_acquire_i8(i8* %addr) { ret void }
-  define void @test_load_acquire_i16(i16* %addr) { ret void }
-  define void @test_load_acquire_i32(i32* %addr) { ret void }
-  define void @test_load_acquire_i64(i64* %addr) { ret void }
+  define void @test_load_acquire_i8(ptr %addr) { ret void }
+  define void @test_load_acquire_i16(ptr %addr) { ret void }
+  define void @test_load_acquire_i32(ptr %addr) { ret void }
+  define void @test_load_acquire_i64(ptr %addr) { ret void }
 ...
 ---
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
index 8e4e07cae8ab8..bad2e045f38ac 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define void @test_load_i8(i8* %addr) { ret void }
-  define void @test_load_i16(i16* %addr) { ret void }
-  define void @test_load_i32(i32* %addr) { ret void }
-  define void @test_load_i64(i64* %addr) { ret void }
+  define void @test_load_i8(ptr %addr) { ret void }
+  define void @test_load_i16(ptr %addr) { ret void }
+  define void @test_load_i32(ptr %addr) { ret void }
+  define void @test_load_i64(ptr %addr) { ret void }
 ...
 ---
 name:            test_load_i8

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
index e2659d379cd55..7c9c61db4de99 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
@@ -4,14 +4,14 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define void @store_v2p0(<2 x i8*> %v, <2 x i8*>* %ptr) {
-    store <2 x i8*> %v, <2 x i8*>* %ptr
+  define void @store_v2p0(<2 x ptr> %v, ptr %ptr) {
+    store <2 x ptr> %v, ptr %ptr
     ret void
   }
 
-  define <2 x i8*> @load_v2p0(<2 x i8*>* %ptr) {
-    %v = load <2 x i8*>, <2 x i8*>* %ptr
-    ret <2 x i8*> %v
+  define <2 x ptr> @load_v2p0(ptr %ptr) {
+    %v = load <2 x ptr>, ptr %ptr
+    ret <2 x ptr> %v
   }
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
index 14ccb00ec986d..3a46b2a943288 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
@@ -4,40 +4,40 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @load_s64_gpr(i64* %addr) { ret void }
-  define void @load_s32_gpr(i32* %addr) { ret void }
-  define void @load_s16_gpr_anyext(i16* %addr) { ret void }
-  define void @load_s16_gpr(i16* %addr) { ret void }
-  define void @load_s8_gpr_anyext(i8* %addr) { ret void }
-  define void @load_s8_gpr(i8* %addr) { ret void }
+  define void @load_s64_gpr(ptr %addr) { ret void }
+  define void @load_s32_gpr(ptr %addr) { ret void }
+  define void @load_s16_gpr_anyext(ptr %addr) { ret void }
+  define void @load_s16_gpr(ptr %addr) { ret void }
+  define void @load_s8_gpr_anyext(ptr %addr) { ret void }
+  define void @load_s8_gpr(ptr %addr) { ret void }
 
   define void @load_fi_s64_gpr() {
     %ptr0 = alloca i64
     ret void
   }
 
-  define void @load_gep_128_s64_gpr(i64* %addr) { ret void }
-  define void @load_gep_512_s32_gpr(i32* %addr) { ret void }
-  define void @load_gep_64_s16_gpr(i16* %addr) { ret void }
-  define void @load_gep_1_s8_gpr(i8* %addr) { ret void }
+  define void @load_gep_128_s64_gpr(ptr %addr) { ret void }
+  define void @load_gep_512_s32_gpr(ptr %addr) { ret void }
+  define void @load_gep_64_s16_gpr(ptr %addr) { ret void }
+  define void @load_gep_1_s8_gpr(ptr %addr) { ret void }
 
-  define void @load_s64_fpr(i64* %addr) { ret void }
-  define void @load_s32_fpr(i32* %addr) { ret void }
-  define void @load_s16_fpr(i16* %addr) { ret void }
-  define void @load_s8_fpr(i8* %addr) { ret void }
+  define void @load_s64_fpr(ptr %addr) { ret void }
+  define void @load_s32_fpr(ptr %addr) { ret void }
+  define void @load_s16_fpr(ptr %addr) { ret void }
+  define void @load_s8_fpr(ptr %addr) { ret void }
 
-  define void @load_gep_8_s64_fpr(i64* %addr) { ret void }
-  define void @load_gep_16_s32_fpr(i32* %addr) { ret void }
-  define void @load_gep_64_s16_fpr(i16* %addr) { ret void }
-  define void @load_gep_32_s8_fpr(i8* %addr) { ret void }
+  define void @load_gep_8_s64_fpr(ptr %addr) { ret void }
+  define void @load_gep_16_s32_fpr(ptr %addr) { ret void }
+  define void @load_gep_64_s16_fpr(ptr %addr) { ret void }
+  define void @load_gep_32_s8_fpr(ptr %addr) { ret void }
 
-  define void @load_v2s32(i64 *%addr) { ret void }
-  define void @load_v2s64(i64 *%addr) { ret void }
+  define void @load_v2s32(ptr %addr) { ret void }
+  define void @load_v2s64(ptr %addr) { ret void }
 
-  define void @load_4xi16(<4 x i16>* %ptr) { ret void }
-  define void @load_4xi32(<4 x i32>* %ptr) { ret void }
-  define void @load_8xi16(<8 x i16>* %ptr) { ret void }
-  define void @load_16xi8(<16 x i8>* %ptr) { ret void }
+  define void @load_4xi16(ptr %ptr) { ret void }
+  define void @load_4xi32(ptr %ptr) { ret void }
+  define void @load_8xi16(ptr %ptr) { ret void }
+  define void @load_16xi8(ptr %ptr) { ret void }
   define void @anyext_on_fpr() { ret void }
   define void @anyext_on_fpr8() { ret void }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
index 84fc03637333e..4409ffd2c852e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
@@ -23,9 +23,9 @@
     ret i32 %res
   }
 
-  define i64* @test_phi_ptr(i64* %a, i64* %b, i1 %cond) {
+  define ptr @test_phi_ptr(ptr %a, ptr %b, i1 %cond) {
   entry:
-    ret i64* null
+    ret ptr null
   }
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
index 4e304f0541c5a..bb838ff81c1a6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
@@ -7,7 +7,7 @@
     ret i32 0
   }
 
-  declare i32 @printf(i8*, ...)
+  declare i32 @printf(ptr, ...)
 ...
 ---
 name:            main

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
index fc940f908afe6..e5d1c325f38f1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
@@ -5,7 +5,7 @@
   define void @lr_other_block() { ret void }
   define void @already_live_in() { ret void }
   define void @multi_use() { ret void }
-  declare i8* @llvm.returnaddress(i32 immarg)
+  declare ptr @llvm.returnaddress(i32 immarg)
 ...
 ---
 name:            lr_other_block

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
index 6ce316cec5c3f..91e9971bb3f7a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
@@ -4,8 +4,8 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @sextload_s32_from_s16(i16 *%addr) { ret void }
-  define void @sextload_s32_from_s16_not_combined(i16 *%addr) { ret void }
+  define void @sextload_s32_from_s16(ptr %addr) { ret void }
+  define void @sextload_s32_from_s16_not_combined(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
index 2145ba3086644..2d84e0415a6cc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
@@ -8,13 +8,13 @@
     ret void
   }
 
-  define dso_local i8* @ptr_mask(i8* %in) { ret i8* undef }
+  define dso_local ptr @ptr_mask(ptr %in) { ret ptr undef }
 
   @var_local = dso_local global i8 0
-  define dso_local i8* @global_local() { ret i8* undef }
+  define dso_local ptr @global_local() { ret ptr undef }
 
   @var_got = external dso_local global i8
-  define dso_local i8* @global_got() { ret i8* undef }
+  define dso_local ptr @global_got() { ret ptr undef }
 
   define dso_local void @icmp() { ret void }
   define dso_local void @fcmp() { ret void }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
index 6b899dc4c84ab..0eb8521424ddb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
@@ -2,16 +2,16 @@
 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define i32 @test_store_release_i64(i32 %a, i64* %addr) {
+  define i32 @test_store_release_i64(i32 %a, ptr %addr) {
     ret i32 %a
   }
 
-  define i32 @test_store_release_i32(i32 %a, i64* %addr) {
+  define i32 @test_store_release_i32(i32 %a, ptr %addr) {
     ret i32 %a
   }
 
-  define void @test_store_release_i8(i32, i8 %val, i8* %addr) { ret void }
-  define void @test_store_release_i16(i32, i16 %val, i16* %addr) { ret void }
+  define void @test_store_release_i8(i32, i8 %val, ptr %addr) { ret void }
+  define void @test_store_release_i16(i32, i16 %val, ptr %addr) { ret void }
 ...
 ---
 name:            test_store_release_i64

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
index 29fc8b3f94fa3..489868ba5cf5d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
@@ -5,7 +5,7 @@
     %alloca = alloca i32, align 4
     %bitcast = bitcast double %x to i64
     %trunc = trunc i64 %bitcast to i32
-    store i32 %trunc, i32* %alloca, align 4
+    store i32 %trunc, ptr %alloca, align 4
     ret void
   }
 
@@ -13,7 +13,7 @@
     %alloca = alloca i16, align 2
     %bitcast = bitcast double %x to i64
     %trunc = trunc i64 %bitcast to i16
-    store i16 %trunc, i16* %alloca, align 2
+    store i16 %trunc, ptr %alloca, align 2
     ret void
   }
 
@@ -21,7 +21,7 @@
     %alloca = alloca i8, align 1
     %bitcast = bitcast double %x to i64
     %trunc = trunc i64 %bitcast to i8
-    store i8 %trunc, i8* %alloca, align 1
+    store i8 %trunc, ptr %alloca, align 1
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
index 88a01f043586f..bafd4113301c8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
@@ -4,47 +4,47 @@
 --- |
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
-  define void @store_s64_gpr(i64* %addr) { ret void }
-  define void @store_s32_gpr(i32* %addr) { ret void }
-  define void @store_s16_gpr(i16* %addr) { ret void }
-  define void @store_s8_gpr(i8* %addr) { ret void }
+  define void @store_s64_gpr(ptr %addr) { ret void }
+  define void @store_s32_gpr(ptr %addr) { ret void }
+  define void @store_s16_gpr(ptr %addr) { ret void }
+  define void @store_s8_gpr(ptr %addr) { ret void }
 
-  define void @store_zero_s64_gpr(i64* %addr) { ret void }
-  define void @store_zero_s32_gpr(i32* %addr) { ret void }
-  define void @store_zero_s16(i32* %addr) { ret void }
-  define void @store_zero_s8(i32* %addr) { ret void }
-  define void @store_zero_look_through_cst(i32* %addr) { ret void }
+  define void @store_zero_s64_gpr(ptr %addr) { ret void }
+  define void @store_zero_s32_gpr(ptr %addr) { ret void }
+  define void @store_zero_s16(ptr %addr) { ret void }
+  define void @store_zero_s8(ptr %addr) { ret void }
+  define void @store_zero_look_through_cst(ptr %addr) { ret void }
 
   define void @store_fi_s64_gpr() {
     %ptr0 = alloca i64
     ret void
   }
 
-  define void @store_gep_128_s64_gpr(i64* %addr) { ret void }
-  define void @store_gep_512_s32_gpr(i32* %addr) { ret void }
-  define void @store_gep_64_s16_gpr(i16* %addr) { ret void }
-  define void @store_gep_1_s8_gpr(i8* %addr) { ret void }
+  define void @store_gep_128_s64_gpr(ptr %addr) { ret void }
+  define void @store_gep_512_s32_gpr(ptr %addr) { ret void }
+  define void @store_gep_64_s16_gpr(ptr %addr) { ret void }
+  define void @store_gep_1_s8_gpr(ptr %addr) { ret void }
 
-  define void @store_s64_fpr(i64* %addr) { ret void }
-  define void @store_s32_fpr(i32* %addr) { ret void }
+  define void @store_s64_fpr(ptr %addr) { ret void }
+  define void @store_s32_fpr(ptr %addr) { ret void }
 
-  define void @store_gep_8_s64_fpr(i64* %addr) { ret void }
-  define void @store_gep_8_s32_fpr(i32* %addr) { ret void }
+  define void @store_gep_8_s64_fpr(ptr %addr) { ret void }
+  define void @store_gep_8_s32_fpr(ptr %addr) { ret void }
 
-  define void @store_v2s32(i64 *%addr) { ret void }
-  define void @store_v2s64(i64 *%addr) { ret void }
+  define void @store_v2s32(ptr %addr) { ret void }
+  define void @store_v2s64(ptr %addr) { ret void }
 
-  define void @store_4xi16(<4 x i16> %v, <4 x i16>* %ptr) { ret void }
-  define void @store_4xi32(<4 x i32> %v, <4 x i32>* %ptr) { ret void }
-  define void @store_8xi16(<8 x i16> %v, <8 x i16>* %ptr) { ret void }
-  define void @store_16xi8(<16 x i8> %v, <16 x i8>* %ptr) { ret void }
+  define void @store_4xi16(<4 x i16> %v, ptr %ptr) { ret void }
+  define void @store_4xi32(<4 x i32> %v, ptr %ptr) { ret void }
+  define void @store_8xi16(<8 x i16> %v, ptr %ptr) { ret void }
+  define void @store_16xi8(<16 x i8> %v, ptr %ptr) { ret void }
 
-  @x = external hidden local_unnamed_addr global i32*, align 8
+  @x = external hidden local_unnamed_addr global ptr, align 8
   define void @store_adrp_add_low() { ret void }
   define void @store_adrp_add_low_foldable_offset() { ret void }
   define void @store_adrp_add_low_unfoldable_offset() { ret void }
 
-  define void @truncstores(i8* %addr) { ret void }
+  define void @truncstores(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
index ebbf69e51ee81..dbddaf5689264 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define void @test_store_i8(i32, i8 %val, i8* %addr) { ret void }
-  define void @test_store_i16(i32, i16 %val, i16* %addr) { ret void }
-  define void @test_store_i32(i32, i32 %val, i32* %addr) { ret void }
-  define void @test_store_i64(i32, i64 %val, i64* %addr) { ret void }
+  define void @test_store_i8(i32, i8 %val, ptr %addr) { ret void }
+  define void @test_store_i16(i32, i16 %val, ptr %addr) { ret void }
+  define void @test_store_i32(i32, i32 %val, ptr %addr) { ret void }
+  define void @test_store_i64(i32, i64 %val, ptr %addr) { ret void }
 ...
 ---
 name:            test_store_i8

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
index 60cddbf794bc7..bcdd77ac57570 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
@@ -9,13 +9,13 @@
     ret void
   }
 
-  define i8* @ptr_mask(i8* %in) { ret i8* undef }
+  define ptr @ptr_mask(ptr %in) { ret ptr undef }
 
   @var_local = global i8 0
-  define i8* @global_local() { ret i8* undef }
+  define ptr @global_local() { ret ptr undef }
 
   @var_got = external global i8
-  define i8* @global_got() { ret i8* undef }
+  define ptr @global_got() { ret ptr undef }
 
   define void @icmp() { ret void }
   define void @fcmp() { ret void }

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/sext-inreg-ldrow-16b.mir b/llvm/test/CodeGen/AArch64/GlobalISel/sext-inreg-ldrow-16b.mir
index fdc417186304a..47fc19bdc1440 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/sext-inreg-ldrow-16b.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/sext-inreg-ldrow-16b.mir
@@ -6,17 +6,17 @@
 
   @x = dso_local global i32 -32768, align 4
 
-  define dso_local i32 @check_sext_not_lost(i32* %ptr) {
+  define dso_local i32 @check_sext_not_lost(ptr %ptr) {
   entry:
-    %ptr.addr = alloca i32*, align 8
-    store i32* %ptr, i32** %ptr.addr, align 8
-    %0 = load i32*, i32** %ptr.addr, align 8
-    %1 = load i32, i32* @x, align 4
+    %ptr.addr = alloca ptr, align 8
+    store ptr %ptr, ptr %ptr.addr, align 8
+    %0 = load ptr, ptr %ptr.addr, align 8
+    %1 = load i32, ptr @x, align 4
     %sub = sub nsw i32 %1, 32768
     %conv = trunc i32 %sub to i16
     %idxprom = sext i16 %conv to i64
-    %arrayidx = getelementptr inbounds i32, i32* %0, i64 %idxprom
-    %2 = load i32, i32* %arrayidx, align 4
+    %arrayidx = getelementptr inbounds i32, ptr %0, i64 %idxprom
+    %2 = load i32, ptr %arrayidx, align 4
     ret i32 %2
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
index 2529e4ba0dafb..8214b632e5f33 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
@@ -2,15 +2,15 @@
 # RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define void @strxrox(i64* %addr) { ret void }
-  define void @strxrox_p0(i64* %addr) { ret void }
-  define void @strdrox(i64* %addr) { ret void }
-  define void @strwrox(i64* %addr) { ret void }
-  define void @strsrox(i64* %addr) { ret void }
-  define void @strhrox(i64* %addr) { ret void }
-  define void @strqrox(i64* %addr) { ret void }
-  define void @shl(i64* %addr) { ret void }
-  define void @shl_p0(i64* %addr) { ret void }
+  define void @strxrox(ptr %addr) { ret void }
+  define void @strxrox_p0(ptr %addr) { ret void }
+  define void @strdrox(ptr %addr) { ret void }
+  define void @strwrox(ptr %addr) { ret void }
+  define void @strsrox(ptr %addr) { ret void }
+  define void @strhrox(ptr %addr) { ret void }
+  define void @strqrox(ptr %addr) { ret void }
+  define void @shl(ptr %addr) { ret void }
+  define void @shl_p0(ptr %addr) { ret void }
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.mir b/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.mir
index e98e1ce599f2f..b0fc9b650187f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/store-merging.mir
@@ -4,161 +4,161 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64"
 
-  define void @test_simple_2xs8(i8* %ptr) {
-    %addr11 = bitcast i8* %ptr to i8*
-    store i8 4, i8* %addr11, align 1
-    %addr2 = getelementptr i8, i8* %ptr, i64 1
-    store i8 5, i8* %addr2, align 1
+  define void @test_simple_2xs8(ptr %ptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i8 4, ptr %addr11, align 1
+    %addr2 = getelementptr i8, ptr %ptr, i64 1
+    store i8 5, ptr %addr2, align 1
     ret void
   }
 
-  define void @test_simple_2xs16(i16* %ptr) {
-    %addr11 = bitcast i16* %ptr to i16*
-    store i16 4, i16* %addr11, align 2
-    %addr2 = getelementptr i16, i16* %ptr, i64 1
-    store i16 5, i16* %addr2, align 2
+  define void @test_simple_2xs16(ptr %ptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i16 4, ptr %addr11, align 2
+    %addr2 = getelementptr i16, ptr %ptr, i64 1
+    store i16 5, ptr %addr2, align 2
     ret void
   }
 
-  define void @test_simple_4xs16(i16* %ptr) {
-    %addr11 = bitcast i16* %ptr to i16*
-    store i16 4, i16* %addr11, align 2
-    %addr2 = getelementptr i16, i16* %ptr, i64 1
-    store i16 5, i16* %addr2, align 2
-    %addr3 = getelementptr i16, i16* %ptr, i64 2
-    store i16 9, i16* %addr3, align 2
-    %addr4 = getelementptr i16, i16* %ptr, i64 3
-    store i16 14, i16* %addr4, align 2
+  define void @test_simple_4xs16(ptr %ptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i16 4, ptr %addr11, align 2
+    %addr2 = getelementptr i16, ptr %ptr, i64 1
+    store i16 5, ptr %addr2, align 2
+    %addr3 = getelementptr i16, ptr %ptr, i64 2
+    store i16 9, ptr %addr3, align 2
+    %addr4 = getelementptr i16, ptr %ptr, i64 3
+    store i16 14, ptr %addr4, align 2
     ret void
   }
 
-  define void @test_simple_2xs32(i32* %ptr) {
-    %addr11 = bitcast i32* %ptr to i32*
-    store i32 4, i32* %addr11, align 4
-    %addr2 = getelementptr i32, i32* %ptr, i64 1
-    store i32 5, i32* %addr2, align 4
+  define void @test_simple_2xs32(ptr %ptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i32 4, ptr %addr11, align 4
+    %addr2 = getelementptr i32, ptr %ptr, i64 1
+    store i32 5, ptr %addr2, align 4
     ret void
   }
 
-  define void @test_simple_2xs64_illegal(i64* %ptr) {
-    %addr11 = bitcast i64* %ptr to i64*
-    store i64 4, i64* %addr11, align 8
-    %addr2 = getelementptr i64, i64* %ptr, i64 1
-    store i64 5, i64* %addr2, align 8
+  define void @test_simple_2xs64_illegal(ptr %ptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i64 4, ptr %addr11, align 8
+    %addr2 = getelementptr i64, ptr %ptr, i64 1
+    store i64 5, ptr %addr2, align 8
     ret void
   }
 
-  define void @test_simple_vector(<2 x i16>* %ptr) {
-    %addr11 = bitcast <2 x i16>* %ptr to <2 x i16>*
-    store <2 x i16> <i16 4, i16 7>, <2 x i16>* %addr11, align 4
-    %addr2 = getelementptr <2 x i16>, <2 x i16>* %ptr, i64 1
-    store <2 x i16> <i16 5, i16 8>, <2 x i16>* %addr2, align 4
+  define void @test_simple_vector(ptr %ptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store <2 x i16> <i16 4, i16 7>, ptr %addr11, align 4
+    %addr2 = getelementptr <2 x i16>, ptr %ptr, i64 1
+    store <2 x i16> <i16 5, i16 8>, ptr %addr2, align 4
     ret void
   }
 
-  define i32 @test_unknown_alias(i32* %ptr, i32* %aliasptr) {
-    %addr11 = bitcast i32* %ptr to i32*
-    store i32 4, i32* %addr11, align 4
-    %ld = load i32, i32* %aliasptr, align 4
-    %addr2 = getelementptr i32, i32* %ptr, i64 1
-    store i32 5, i32* %addr2, align 4
+  define i32 @test_unknown_alias(ptr %ptr, ptr %aliasptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i32 4, ptr %addr11, align 4
+    %ld = load i32, ptr %aliasptr, align 4
+    %addr2 = getelementptr i32, ptr %ptr, i64 1
+    store i32 5, ptr %addr2, align 4
     ret i32 %ld
   }
 
-  define void @test_2x_2xs32(i32* %ptr, i32* %ptr2) {
-    %addr11 = bitcast i32* %ptr to i32*
-    store i32 4, i32* %addr11, align 4
-    %addr2 = getelementptr i32, i32* %ptr, i64 1
-    store i32 5, i32* %addr2, align 4
-    %addr32 = bitcast i32* %ptr2 to i32*
-    store i32 9, i32* %addr32, align 4
-    %addr4 = getelementptr i32, i32* %ptr2, i64 1
-    store i32 17, i32* %addr4, align 4
+  define void @test_2x_2xs32(ptr %ptr, ptr %ptr2) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i32 4, ptr %addr11, align 4
+    %addr2 = getelementptr i32, ptr %ptr, i64 1
+    store i32 5, ptr %addr2, align 4
+    %addr32 = bitcast ptr %ptr2 to ptr
+    store i32 9, ptr %addr32, align 4
+    %addr4 = getelementptr i32, ptr %ptr2, i64 1
+    store i32 17, ptr %addr4, align 4
     ret void
   }
 
-  define void @test_simple_var_2xs8(i8* %ptr, i8 %v1, i8 %v2) {
-    %addr11 = bitcast i8* %ptr to i8*
-    store i8 %v1, i8* %addr11, align 1
-    %addr2 = getelementptr i8, i8* %ptr, i64 1
-    store i8 %v2, i8* %addr2, align 1
+  define void @test_simple_var_2xs8(ptr %ptr, i8 %v1, i8 %v2) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i8 %v1, ptr %addr11, align 1
+    %addr2 = getelementptr i8, ptr %ptr, i64 1
+    store i8 %v2, ptr %addr2, align 1
     ret void
   }
 
-  define void @test_simple_var_2xs16(i16* %ptr, i16 %v1, i16 %v2) {
-    %addr11 = bitcast i16* %ptr to i16*
-    store i16 %v1, i16* %addr11, align 2
-    %addr2 = getelementptr i16, i16* %ptr, i64 1
-    store i16 %v2, i16* %addr2, align 2
+  define void @test_simple_var_2xs16(ptr %ptr, i16 %v1, i16 %v2) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i16 %v1, ptr %addr11, align 2
+    %addr2 = getelementptr i16, ptr %ptr, i64 1
+    store i16 %v2, ptr %addr2, align 2
     ret void
   }
 
-  define void @test_simple_var_2xs32(i32* %ptr, i32 %v1, i32 %v2) {
-    %addr11 = bitcast i32* %ptr to i32*
-    store i32 %v1, i32* %addr11, align 4
-    %addr2 = getelementptr i32, i32* %ptr, i64 1
-    store i32 %v2, i32* %addr2, align 4
+  define void @test_simple_var_2xs32(ptr %ptr, i32 %v1, i32 %v2) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i32 %v1, ptr %addr11, align 4
+    %addr2 = getelementptr i32, ptr %ptr, i64 1
+    store i32 %v2, ptr %addr2, align 4
     ret void
   }
 
-  define void @test_alias_4xs16(i16* %ptr, i16* %ptr2) {
-    %addr11 = bitcast i16* %ptr to i16*
-    store i16 4, i16* %addr11, align 2
-    %addr2 = getelementptr i16, i16* %ptr, i64 1
-    store i16 5, i16* %addr2, align 2
-    %addr3 = getelementptr i16, i16* %ptr, i64 2
-    store i16 9, i16* %addr3, align 2
-    store i16 0, i16* %ptr2, align 2
-    %addr4 = getelementptr i16, i16* %ptr, i64 3
-    store i16 14, i16* %addr4, align 2
+  define void @test_alias_4xs16(ptr %ptr, ptr %ptr2) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i16 4, ptr %addr11, align 2
+    %addr2 = getelementptr i16, ptr %ptr, i64 1
+    store i16 5, ptr %addr2, align 2
+    %addr3 = getelementptr i16, ptr %ptr, i64 2
+    store i16 9, ptr %addr3, align 2
+    store i16 0, ptr %ptr2, align 2
+    %addr4 = getelementptr i16, ptr %ptr, i64 3
+    store i16 14, ptr %addr4, align 2
     ret void
   }
 
-  define void @test_alias2_4xs16(i16* %ptr, i16* %ptr2, i16* %ptr3) {
-    %addr11 = bitcast i16* %ptr to i16*
-    store i16 4, i16* %addr11, align 2
-    %addr2 = getelementptr i16, i16* %ptr, i64 1
-    store i16 0, i16* %ptr3, align 2
-    store i16 5, i16* %addr2, align 2
-    %addr3 = getelementptr i16, i16* %ptr, i64 2
-    store i16 9, i16* %addr3, align 2
-    store i16 0, i16* %ptr2, align 2
-    %addr4 = getelementptr i16, i16* %ptr, i64 3
-    store i16 14, i16* %addr4, align 2
+  define void @test_alias2_4xs16(ptr %ptr, ptr %ptr2, ptr %ptr3) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i16 4, ptr %addr11, align 2
+    %addr2 = getelementptr i16, ptr %ptr, i64 1
+    store i16 0, ptr %ptr3, align 2
+    store i16 5, ptr %addr2, align 2
+    %addr3 = getelementptr i16, ptr %ptr, i64 2
+    store i16 9, ptr %addr3, align 2
+    store i16 0, ptr %ptr2, align 2
+    %addr4 = getelementptr i16, ptr %ptr, i64 3
+    store i16 14, ptr %addr4, align 2
     ret void
   }
 
-  define void @test_alias3_4xs16(i16* %ptr, i16* %ptr2, i16* %ptr3, i16* %ptr4) {
-    %addr11 = bitcast i16* %ptr to i16*
-    store i16 4, i16* %addr11, align 2
-    %addr2 = getelementptr i16, i16* %ptr, i64 1
-    store i16 0, i16* %ptr3, align 2
-    store i16 5, i16* %addr2, align 2
-    store i16 0, i16* %ptr4, align 2
-    %addr3 = getelementptr i16, i16* %ptr, i64 2
-    store i16 9, i16* %addr3, align 2
-    store i16 0, i16* %ptr2, align 2
-    %addr4 = getelementptr i16, i16* %ptr, i64 3
-    store i16 14, i16* %addr4, align 2
+  define void @test_alias3_4xs16(ptr %ptr, ptr %ptr2, ptr %ptr3, ptr %ptr4) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i16 4, ptr %addr11, align 2
+    %addr2 = getelementptr i16, ptr %ptr, i64 1
+    store i16 0, ptr %ptr3, align 2
+    store i16 5, ptr %addr2, align 2
+    store i16 0, ptr %ptr4, align 2
+    %addr3 = getelementptr i16, ptr %ptr, i64 2
+    store i16 9, ptr %addr3, align 2
+    store i16 0, ptr %ptr2, align 2
+    %addr4 = getelementptr i16, ptr %ptr, i64 3
+    store i16 14, ptr %addr4, align 2
     ret void
   }
 
-  define i32 @test_alias_allocas_2xs32(i32* %ptr) {
+  define i32 @test_alias_allocas_2xs32(ptr %ptr) {
     %a1 = alloca [6 x i32], align 4
     %a2 = alloca i32, align 4
-    %addr11 = bitcast [6 x i32]* %a1 to i32*
-    store i32 4, i32* %addr11, align 4
-    %ld = load i32, i32* %a2, align 4
-    %addr2 = getelementptr [6 x i32], [6 x i32]* %a1, i64 0, i32 1
-    store i32 5, i32* %addr2, align 4
+    %addr11 = bitcast ptr %a1 to ptr
+    store i32 4, ptr %addr11, align 4
+    %ld = load i32, ptr %a2, align 4
+    %addr2 = getelementptr [6 x i32], ptr %a1, i64 0, i32 1
+    store i32 5, ptr %addr2, align 4
     ret i32 %ld
   }
 
-  define void @test_simple_2xs32_with_align(i32* %ptr) {
-    %addr11 = bitcast i32* %ptr to i32*
-    store i32 4, i32* %addr11, align 4
-    %addr2 = getelementptr i32, i32* %ptr, i64 1
-    store i32 5, i32* %addr2, align 4
+  define void @test_simple_2xs32_with_align(ptr %ptr) {
+    %addr11 = bitcast ptr %ptr to ptr
+    store i32 4, ptr %addr11, align 4
+    %addr2 = getelementptr i32, ptr %ptr, i64 1
+    store i32 5, ptr %addr2, align 4
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/PBQP-csr.ll b/llvm/test/CodeGen/AArch64/PBQP-csr.ll
index e071eda17e354..55dbc5720b47f 100644
--- a/llvm/test/CodeGen/AArch64/PBQP-csr.ll
+++ b/llvm/test/CodeGen/AArch64/PBQP-csr.ll
@@ -1,37 +1,37 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a57 -mattr=+neon -fp-contract=fast -regalloc=pbqp -pbqp-coalescing | FileCheck %s
 
-%pl = type { i32, i32, i32, i32, %p*, %l*, double* }
-%p = type { i32, %ca*, [27 x %ca*], %v*, %v*, %v*, i32 }
+%pl = type { i32, i32, i32, i32, ptr, ptr, ptr }
+%p = type { i32, ptr, [27 x ptr], ptr, ptr, ptr, i32 }
 %ca = type { %v, float, i32 }
 %v = type { double, double, double }
 %l = type opaque
-%rs = type { i32, i32, i32, i32, %v*, %v*, [21 x double], %v, %v, %v, double, double, double }
+%rs = type { i32, i32, i32, i32, ptr, ptr, [21 x double], %v, %v, %v, double, double, double }
 
 ;CHECK-LABEL: test_csr
-define void @test_csr(%pl* nocapture readnone %this, %rs* nocapture %r) align 2 {
+define void @test_csr(ptr nocapture readnone %this, ptr nocapture %r) align 2 {
 ;CHECK-NOT: stp {{d[0-9]+}}, {{d[0-9]+}}
 entry:
-  %x.i = getelementptr inbounds %rs, %rs* %r, i64 0, i32 7, i32 0
-  %y.i = getelementptr inbounds %rs, %rs* %r, i64 0, i32 7, i32 1
-  %z.i = getelementptr inbounds %rs, %rs* %r, i64 0, i32 7, i32 2
-  %x.i61 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 8, i32 0
-  %y.i62 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 8, i32 1
-  %z.i63 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 8, i32 2
-  %x.i58 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 0
-  %y.i59 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 1
-  %z.i60 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 2
-  %na = getelementptr inbounds %rs, %rs* %r, i64 0, i32 0
-  %0 = bitcast double* %x.i to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 72, i1 false)
-  %1 = load i32, i32* %na, align 4
+  %x.i = getelementptr inbounds %rs, ptr %r, i64 0, i32 7, i32 0
+  %y.i = getelementptr inbounds %rs, ptr %r, i64 0, i32 7, i32 1
+  %z.i = getelementptr inbounds %rs, ptr %r, i64 0, i32 7, i32 2
+  %x.i61 = getelementptr inbounds %rs, ptr %r, i64 0, i32 8, i32 0
+  %y.i62 = getelementptr inbounds %rs, ptr %r, i64 0, i32 8, i32 1
+  %z.i63 = getelementptr inbounds %rs, ptr %r, i64 0, i32 8, i32 2
+  %x.i58 = getelementptr inbounds %rs, ptr %r, i64 0, i32 9, i32 0
+  %y.i59 = getelementptr inbounds %rs, ptr %r, i64 0, i32 9, i32 1
+  %z.i60 = getelementptr inbounds %rs, ptr %r, i64 0, i32 9, i32 2
+  %na = getelementptr inbounds %rs, ptr %r, i64 0, i32 0
+  %0 = bitcast ptr %x.i to ptr
+  call void @llvm.memset.p0.i64(ptr align 8 %0, i8 0, i64 72, i1 false)
+  %1 = load i32, ptr %na, align 4
   %cmp70 = icmp sgt i32 %1, 0
   br i1 %cmp70, label %for.body.lr.ph, label %for.end
 
 for.body.lr.ph:                                   ; preds = %entry
-  %fn = getelementptr inbounds %rs, %rs* %r, i64 0, i32 4
-  %2 = load %v*, %v** %fn, align 8
-  %fs = getelementptr inbounds %rs, %rs* %r, i64 0, i32 5
-  %3 = load %v*, %v** %fs, align 8
+  %fn = getelementptr inbounds %rs, ptr %r, i64 0, i32 4
+  %2 = load ptr, ptr %fn, align 8
+  %fs = getelementptr inbounds %rs, ptr %r, i64 0, i32 5
+  %3 = load ptr, ptr %fs, align 8
   %4 = sext i32 %1 to i64
   br label %for.body
 
@@ -42,39 +42,39 @@ for.body:                                         ; preds = %for.body.lr.ph, %fo
   %7 = phi <2 x double> [ zeroinitializer, %for.body.lr.ph ], [ %22, %for.body ]
   %8 = phi <2 x double> [ zeroinitializer, %for.body.lr.ph ], [ %26, %for.body ]
   %9 = phi <2 x double> [ zeroinitializer, %for.body.lr.ph ], [ %28, %for.body ]
-  %x.i54 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 0
-  %x1.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 0
-  %y.i56 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 1
-  %10 = bitcast double* %x.i54 to <2 x double>*
-  %11 = load <2 x double>, <2 x double>* %10, align 8
-  %y2.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 1
-  %12 = bitcast double* %x1.i to <2 x double>*
-  %13 = load <2 x double>, <2 x double>* %12, align 8
+  %x.i54 = getelementptr inbounds %v, ptr %2, i64 %indvars.iv, i32 0
+  %x1.i = getelementptr inbounds %v, ptr %3, i64 %indvars.iv, i32 0
+  %y.i56 = getelementptr inbounds %v, ptr %2, i64 %indvars.iv, i32 1
+  %10 = bitcast ptr %x.i54 to ptr
+  %11 = load <2 x double>, ptr %10, align 8
+  %y2.i = getelementptr inbounds %v, ptr %3, i64 %indvars.iv, i32 1
+  %12 = bitcast ptr %x1.i to ptr
+  %13 = load <2 x double>, ptr %12, align 8
   %14 = fadd fast <2 x double> %13, %11
-  %z.i57 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 2
-  %15 = load double, double* %z.i57, align 8
-  %z4.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 2
-  %16 = load double, double* %z4.i, align 8
+  %z.i57 = getelementptr inbounds %v, ptr %2, i64 %indvars.iv, i32 2
+  %15 = load double, ptr %z.i57, align 8
+  %z4.i = getelementptr inbounds %v, ptr %3, i64 %indvars.iv, i32 2
+  %16 = load double, ptr %z4.i, align 8
   %add5.i = fadd fast double %16, %15
   %17 = fadd fast <2 x double> %6, %11
-  %18 = bitcast double* %x.i to <2 x double>*
-  store <2 x double> %17, <2 x double>* %18, align 8
-  %19 = load double, double* %x1.i, align 8
+  %18 = bitcast ptr %x.i to ptr
+  store <2 x double> %17, ptr %18, align 8
+  %19 = load double, ptr %x1.i, align 8
   %20 = insertelement <2 x double> undef, double %15, i32 0
   %21 = insertelement <2 x double> %20, double %19, i32 1
   %22 = fadd fast <2 x double> %7, %21
-  %23 = bitcast double* %z.i to <2 x double>*
-  store <2 x double> %22, <2 x double>* %23, align 8
-  %24 = bitcast double* %y2.i to <2 x double>*
-  %25 = load <2 x double>, <2 x double>* %24, align 8
+  %23 = bitcast ptr %z.i to ptr
+  store <2 x double> %22, ptr %23, align 8
+  %24 = bitcast ptr %y2.i to ptr
+  %25 = load <2 x double>, ptr %24, align 8
   %26 = fadd fast <2 x double> %8, %25
-  %27 = bitcast double* %y.i62 to <2 x double>*
-  store <2 x double> %26, <2 x double>* %27, align 8
+  %27 = bitcast ptr %y.i62 to ptr
+  store <2 x double> %26, ptr %27, align 8
   %28 = fadd fast <2 x double> %14, %9
-  %29 = bitcast double* %x.i58 to <2 x double>*
-  store <2 x double> %28, <2 x double>* %29, align 8
+  %29 = bitcast ptr %x.i58 to ptr
+  store <2 x double> %28, ptr %29, align 8
   %add6.i = fadd fast double %add5.i, %5
-  store double %add6.i, double* %z.i60, align 8
+  store double %add6.i, ptr %z.i60, align 8
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %cmp = icmp slt i64 %indvars.iv.next, %4
   br i1 %cmp, label %for.body, label %for.end.loopexit
@@ -87,5 +87,5 @@ for.end:                                          ; preds = %for.end.loopexit, %
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 

diff  --git a/llvm/test/CodeGen/AArch64/a55-fuse-address.mir b/llvm/test/CodeGen/AArch64/a55-fuse-address.mir
index d4e66ec99afff..4edff043a7b3e 100644
--- a/llvm/test/CodeGen/AArch64/a55-fuse-address.mir
+++ b/llvm/test/CodeGen/AArch64/a55-fuse-address.mir
@@ -9,8 +9,8 @@
 
   define i32 @fuseaddress(i32 %num) #0 {
   entry:
-    %0 = load i32, i32* @a, align 4
-    %1 = load i32, i32* @b, align 4
+    %0 = load i32, ptr @a, align 4
+    %1 = load i32, ptr @b, align 4
     %mul = mul nsw i32 %0, %1
     ret i32 %mul
   }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-dup-ext-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-dup-ext-crash.ll
index ec118a50d56de..ef54cc4bbf718 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dup-ext-crash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dup-ext-crash.ll
@@ -32,12 +32,12 @@ vector.ph:                                        ; preds = %vector.memcheck
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
-  %wide.load = load <2 x i32>, <2 x i32>* undef, align 4
+  %wide.load = load <2 x i32>, ptr undef, align 4
   %0 = zext <2 x i32> %wide.load to <2 x i64>
   %1 = mul nuw <2 x i64> %broadcast.splat, %0
   %2 = trunc <2 x i64> %1 to <2 x i32>
   %3 = select <2 x i1> undef, <2 x i32> undef, <2 x i32> %2
-  %4 = bitcast i32* undef to <2 x i32>*
-  store <2 x i32> %3, <2 x i32>* %4, align 4
+  %4 = bitcast ptr undef to ptr
+  store <2 x i32> %3, ptr %4, align 4
   br label %vector.body
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-ldst-no-premature-sp-pop.mir b/llvm/test/CodeGen/AArch64/aarch64-ldst-no-premature-sp-pop.mir
index 7878bac06472b..ba621cf77f9ae 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-ldst-no-premature-sp-pop.mir
+++ b/llvm/test/CodeGen/AArch64/aarch64-ldst-no-premature-sp-pop.mir
@@ -7,16 +7,16 @@
 
   define hidden i32 @foo(i32 %0) {
     %2 = alloca [4 x i32], align 4
-    %3 = bitcast [4 x i32]* %2 to i8*
-    call void @llvm.memset.p0i8.i64(i8* nonnull align 4 dereferenceable(16) %3, i8 0, i64 16, i1 false)
+    %3 = bitcast ptr %2 to ptr
+    call void @llvm.memset.p0.i64(ptr nonnull align 4 dereferenceable(16) %3, i8 0, i64 16, i1 false)
     %4 = sext i32 %0 to i64
-    %5 = getelementptr inbounds [4 x i32], [4 x i32]* %2, i64 0, i64 %4
-    %6 = load i32, i32* %5, align 4
+    %5 = getelementptr inbounds [4 x i32], ptr %2, i64 0, i64 %4
+    %6 = load i32, ptr %5, align 4
     ret i32 %6
   }
 
-  declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #2
-  declare void @llvm.stackprotector(i8*, i8**) #3
+  declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
+  declare void @llvm.stackprotector(ptr, ptr) #3
 
   !llvm.module.flags = !{!0}
   !llvm.ident = !{!1}

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index 2eb1031cefe5b..f44f7863e78fc 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -o -| FileCheck %s
 
-define void @matrix_mul_unsigned(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i16 %val) {
+define void @matrix_mul_unsigned(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i16 %val) {
 ; CHECK-LABEL: matrix_mul_unsigned:
 ; CHECK:       // %bb.0: // %vector.header
 ; CHECK-NEXT:    and w8, w3, #0xffff
@@ -41,22 +41,22 @@ vector.body:                                      ; preds = %vector.header, %vec
   %3 = trunc i64 %index to i32
   %4 = add i32 %N, %3
   %5 = zext i32 %4 to i64
-  %6 = getelementptr inbounds i16, i16* %A, i64 %5
-  %7 = bitcast i16* %6 to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %7, align 2
-  %8 = getelementptr inbounds i16, i16* %6, i64 4
-  %9 = bitcast i16* %8 to <4 x i16>*
-  %wide.load30 = load <4 x i16>, <4 x i16>* %9, align 2
+  %6 = getelementptr inbounds i16, ptr %A, i64 %5
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <4 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %6, i64 4
+  %9 = bitcast ptr %8 to ptr
+  %wide.load30 = load <4 x i16>, ptr %9, align 2
   %10 = zext <4 x i16> %wide.load to <4 x i32>
   %11 = zext <4 x i16> %wide.load30 to <4 x i32>
   %12 = mul nuw nsw <4 x i32> %broadcast.splat, %10
   %13 = mul nuw nsw <4 x i32> %broadcast.splat32, %11
-  %14 = getelementptr inbounds i32, i32* %C, i64 %5
-  %15 = bitcast i32* %14 to <4 x i32>*
-  store <4 x i32> %12, <4 x i32>* %15, align 4
-  %16 = getelementptr inbounds i32, i32* %14, i64 4
-  %17 = bitcast i32* %16 to <4 x i32>*
-  store <4 x i32> %13, <4 x i32>* %17, align 4
+  %14 = getelementptr inbounds i32, ptr %C, i64 %5
+  %15 = bitcast ptr %14 to ptr
+  store <4 x i32> %12, ptr %15, align 4
+  %16 = getelementptr inbounds i32, ptr %14, i64 4
+  %17 = bitcast ptr %16 to ptr
+  store <4 x i32> %13, ptr %17, align 4
   %index.next = add i64 %index, 8
   %18 = icmp eq i64 %index.next, %n.vec
   br i1 %18, label %for.end12, label %vector.body
@@ -65,7 +65,7 @@ for.end12:                                        ; preds = %vector.body
   ret void
 }
 
-define void @matrix_mul_signed(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i16 %val) {
+define void @matrix_mul_signed(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i16 %val) {
 ; CHECK-LABEL: matrix_mul_signed:
 ; CHECK:       // %bb.0: // %vector.header
 ; CHECK-NEXT:    sxth w8, w3
@@ -105,22 +105,22 @@ vector.body:                                      ; preds = %vector.header, %vec
   %3 = trunc i64 %index to i32
   %4 = add i32 %N, %3
   %5 = sext i32 %4 to i64
-  %6 = getelementptr inbounds i16, i16* %A, i64 %5
-  %7 = bitcast i16* %6 to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %7, align 2
-  %8 = getelementptr inbounds i16, i16* %6, i64 4
-  %9 = bitcast i16* %8 to <4 x i16>*
-  %wide.load30 = load <4 x i16>, <4 x i16>* %9, align 2
+  %6 = getelementptr inbounds i16, ptr %A, i64 %5
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <4 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %6, i64 4
+  %9 = bitcast ptr %8 to ptr
+  %wide.load30 = load <4 x i16>, ptr %9, align 2
   %10 = sext <4 x i16> %wide.load to <4 x i32>
   %11 = sext <4 x i16> %wide.load30 to <4 x i32>
   %12 = mul nsw <4 x i32> %broadcast.splat, %10
   %13 = mul nsw <4 x i32> %broadcast.splat32, %11
-  %14 = getelementptr inbounds i32, i32* %C, i64 %5
-  %15 = bitcast i32* %14 to <4 x i32>*
-  store <4 x i32> %12, <4 x i32>* %15, align 4
-  %16 = getelementptr inbounds i32, i32* %14, i64 4
-  %17 = bitcast i32* %16 to <4 x i32>*
-  store <4 x i32> %13, <4 x i32>* %17, align 4
+  %14 = getelementptr inbounds i32, ptr %C, i64 %5
+  %15 = bitcast ptr %14 to ptr
+  store <4 x i32> %12, ptr %15, align 4
+  %16 = getelementptr inbounds i32, ptr %14, i64 4
+  %17 = bitcast ptr %16 to ptr
+  store <4 x i32> %13, ptr %17, align 4
   %index.next = add i64 %index, 8
   %18 = icmp eq i64 %index.next, %n.vec
   br i1 %18, label %for.end12, label %vector.body
@@ -130,7 +130,7 @@ for.end12:                                        ; preds = %vector.body
 }
 
 
-define void @matrix_mul_double_shuffle(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i16 %val) {
+define void @matrix_mul_double_shuffle(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i16 %val) {
 ; CHECK-LABEL: matrix_mul_double_shuffle:
 ; CHECK:       // %bb.0: // %vector.header
 ; CHECK-NEXT:    and w8, w3, #0xffff
@@ -165,8 +165,8 @@ vector.header:
 
 vector.body:                                      ; preds = %vector.header, %vector.body
   %index = phi i64 [ %index.next, %vector.body ], [ 0, %vector.header ]
-  %g = getelementptr inbounds i16, i16* %A, i64 %index
-  %val1 = load i16, i16* %g
+  %g = getelementptr inbounds i16, ptr %A, i64 %index
+  %val1 = load i16, ptr %g
   %splat.input.ext = zext i16 %val1 to i32
   %broadcast.splatinsert31 = insertelement <4 x i32> undef, i32 %splat.input.ext, i32 0
   %broadcast.splat32 = shufflevector <4 x i32> %broadcast.splatinsert31, <4 x i32> %broadcast.splat, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -174,9 +174,9 @@ vector.body:                                      ; preds = %vector.header, %vec
   %4 = add i32 %N, %3
   %5 = zext i32 %4 to i64
   %6 = mul nuw nsw <4 x i32> %broadcast.splat, %broadcast.splat32
-  %7 = getelementptr inbounds i32, i32* %C, i64 %5
-  %8 = bitcast i32* %7 to <4 x i32>*
-  store <4 x i32> %6, <4 x i32>* %8, align 4
+  %7 = getelementptr inbounds i32, ptr %C, i64 %5
+  %8 = bitcast ptr %7 to ptr
+  store <4 x i32> %6, ptr %8, align 4
   %index.next = add i64 %index, 8
   %9 = icmp eq i64 %index.next, %n.vec
   br i1 %9, label %for.end12, label %vector.body
@@ -186,7 +186,7 @@ for.end12:                                        ; preds = %vector.body
 }
 
 
-define void @larger_smull(i16* nocapture noundef readonly %x, i16 noundef %y, i32* noalias nocapture noundef writeonly %s, i32 noundef %n) {
+define void @larger_smull(ptr nocapture noundef readonly %x, i16 noundef %y, ptr noalias nocapture noundef writeonly %s, i32 noundef %n) {
 ; CHECK-LABEL: larger_smull:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w3, #1
@@ -253,22 +253,22 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i64 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = getelementptr inbounds i16, i16* %0, i64 8
-  %3 = bitcast i16* %2 to <8 x i16>*
-  %wide.load11 = load <8 x i16>, <8 x i16>* %3, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <8 x i16>, ptr %1, align 2
+  %2 = getelementptr inbounds i16, ptr %0, i64 8
+  %3 = bitcast ptr %2 to ptr
+  %wide.load11 = load <8 x i16>, ptr %3, align 2
   %4 = sext <8 x i16> %wide.load to <8 x i32>
   %5 = sext <8 x i16> %wide.load11 to <8 x i32>
   %6 = mul nsw <8 x i32> %broadcast.splat, %4
   %7 = mul nsw <8 x i32> %broadcast.splat13, %5
-  %8 = getelementptr inbounds i32, i32* %s, i64 %index
-  %9 = bitcast i32* %8 to <8 x i32>*
-  store <8 x i32> %6, <8 x i32>* %9, align 4
-  %10 = getelementptr inbounds i32, i32* %8, i64 8
-  %11 = bitcast i32* %10 to <8 x i32>*
-  store <8 x i32> %7, <8 x i32>* %11, align 4
+  %8 = getelementptr inbounds i32, ptr %s, i64 %index
+  %9 = bitcast ptr %8 to ptr
+  store <8 x i32> %6, ptr %9, align 4
+  %10 = getelementptr inbounds i32, ptr %8, i64 8
+  %11 = bitcast ptr %10 to ptr
+  store <8 x i32> %7, ptr %11, align 4
   %index.next = add nuw i64 %index, 16
   %12 = icmp eq i64 %index.next, %n.vec
   br i1 %12, label %middle.block, label %vector.body
@@ -286,19 +286,19 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader14, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader14 ]
-  %arrayidx = getelementptr inbounds i16, i16* %x, i64 %indvars.iv
-  %13 = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %x, i64 %indvars.iv
+  %13 = load i16, ptr %arrayidx, align 2
   %conv = sext i16 %13 to i32
   %mul = mul nsw i32 %conv, %conv1
-  %arrayidx3 = getelementptr inbounds i32, i32* %s, i64 %indvars.iv
-  store i32 %mul, i32* %arrayidx3, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %s, i64 %indvars.iv
+  store i32 %mul, ptr %arrayidx3, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
 
-define void @larger_umull(i16* nocapture noundef readonly %x, i16 noundef %y, i32* noalias nocapture noundef writeonly %s, i32 noundef %n) {
+define void @larger_umull(ptr nocapture noundef readonly %x, i16 noundef %y, ptr noalias nocapture noundef writeonly %s, i32 noundef %n) {
 ; CHECK-LABEL: larger_umull:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w3, #1
@@ -365,22 +365,22 @@ vector.ph:                                        ; preds = %for.body.preheader
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %0 = getelementptr inbounds i16, i16* %x, i64 %index
-  %1 = bitcast i16* %0 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %1, align 2
-  %2 = getelementptr inbounds i16, i16* %0, i64 8
-  %3 = bitcast i16* %2 to <8 x i16>*
-  %wide.load11 = load <8 x i16>, <8 x i16>* %3, align 2
+  %0 = getelementptr inbounds i16, ptr %x, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <8 x i16>, ptr %1, align 2
+  %2 = getelementptr inbounds i16, ptr %0, i64 8
+  %3 = bitcast ptr %2 to ptr
+  %wide.load11 = load <8 x i16>, ptr %3, align 2
   %4 = zext <8 x i16> %wide.load to <8 x i32>
   %5 = zext <8 x i16> %wide.load11 to <8 x i32>
   %6 = mul nuw <8 x i32> %broadcast.splat, %4
   %7 = mul nuw <8 x i32> %broadcast.splat13, %5
-  %8 = getelementptr inbounds i32, i32* %s, i64 %index
-  %9 = bitcast i32* %8 to <8 x i32>*
-  store <8 x i32> %6, <8 x i32>* %9, align 4
-  %10 = getelementptr inbounds i32, i32* %8, i64 8
-  %11 = bitcast i32* %10 to <8 x i32>*
-  store <8 x i32> %7, <8 x i32>* %11, align 4
+  %8 = getelementptr inbounds i32, ptr %s, i64 %index
+  %9 = bitcast ptr %8 to ptr
+  store <8 x i32> %6, ptr %9, align 4
+  %10 = getelementptr inbounds i32, ptr %8, i64 8
+  %11 = bitcast ptr %10 to ptr
+  store <8 x i32> %7, ptr %11, align 4
   %index.next = add nuw i64 %index, 16
   %12 = icmp eq i64 %index.next, %n.vec
   br i1 %12, label %middle.block, label %vector.body
@@ -398,19 +398,19 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader14, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader14 ]
-  %arrayidx = getelementptr inbounds i16, i16* %x, i64 %indvars.iv
-  %13 = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %x, i64 %indvars.iv
+  %13 = load i16, ptr %arrayidx, align 2
   %conv = zext i16 %13 to i32
   %mul = mul nuw i32 %conv, %conv1
-  %arrayidx3 = getelementptr inbounds i32, i32* %s, i64 %indvars.iv
-  store i32 %mul, i32* %arrayidx3, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %s, i64 %indvars.iv
+  store i32 %mul, ptr %arrayidx3, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
 
-define i16 @red_mla_dup_ext_u8_s8_s16(i8* noalias nocapture noundef readonly %A, i8 noundef %B, i32 noundef %n) {
+define i16 @red_mla_dup_ext_u8_s8_s16(ptr noalias nocapture noundef readonly %A, i8 noundef %B, i32 noundef %n) {
 ; CHECK-LABEL: red_mla_dup_ext_u8_s8_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz w2, .LBB5_3
@@ -484,12 +484,12 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %8, %vector.body ]
   %vec.phi13 = phi <8 x i16> [ zeroinitializer, %vector.ph ], [ %9, %vector.body ]
-  %0 = getelementptr inbounds i8, i8* %A, i64 %index
-  %1 = bitcast i8* %0 to <8 x i8>*
-  %wide.load = load <8 x i8>, <8 x i8>* %1, align 1
-  %2 = getelementptr inbounds i8, i8* %0, i64 8
-  %3 = bitcast i8* %2 to <8 x i8>*
-  %wide.load14 = load <8 x i8>, <8 x i8>* %3, align 1
+  %0 = getelementptr inbounds i8, ptr %A, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <8 x i8>, ptr %1, align 1
+  %2 = getelementptr inbounds i8, ptr %0, i64 8
+  %3 = bitcast ptr %2 to ptr
+  %wide.load14 = load <8 x i8>, ptr %3, align 1
   %4 = zext <8 x i8> %wide.load to <8 x i16>
   %5 = zext <8 x i8> %wide.load14 to <8 x i16>
   %6 = mul nsw <8 x i16> %broadcast.splat, %4
@@ -518,8 +518,8 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 for.body:                                         ; preds = %for.body.preheader17, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader17 ]
   %s.011 = phi i16 [ %add, %for.body ], [ %s.011.ph, %for.body.preheader17 ]
-  %arrayidx = getelementptr inbounds i8, i8* %A, i64 %indvars.iv
-  %12 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %A, i64 %indvars.iv
+  %12 = load i8, ptr %arrayidx, align 1
   %13 = zext i8 %12 to i16
   %mul = mul nsw i16 %13, %conv2
   %add = add i16 %mul, %s.011
@@ -528,7 +528,7 @@ for.body:                                         ; preds = %for.body.preheader1
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
-define void @sink_v2z64_1(i32 *%p, i32 *%d, i64 %n, <2 x i32> %a) {
+define void @sink_v2z64_1(ptr %p, ptr %d, i64 %n, <2 x i32> %a) {
 ; CHECK-LABEL: sink_v2z64_1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -551,16 +551,16 @@ entry:
 
 loop:
   %index = phi i64 [ 0, %entry ], [ %index.next, %loop ]
-  %g = getelementptr inbounds i32, i32 *%p, i64 %index
-  %gb = bitcast i32* %g to <2 x i32>*
-  %l = load <2 x i32>, <2 x i32> *%gb, align 4
+  %g = getelementptr inbounds i32, ptr %p, i64 %index
+  %gb = bitcast ptr %g to ptr
+  %l = load <2 x i32>, ptr %gb, align 4
   %e = zext <2 x i32> %l to <2 x i64>
   %m = mul <2 x i64> %e, %broadcast.splat
   %s = ashr <2 x i64> %m, <i64 15, i64 15>
   %t = trunc <2 x i64> %s to <2 x i32>
-  %h = getelementptr inbounds i32, i32 *%d, i64 %index
-  %hb = bitcast i32* %g to <2 x i32>*
-  store <2 x i32> %t, <2 x i32> *%hb, align 4
+  %h = getelementptr inbounds i32, ptr %d, i64 %index
+  %hb = bitcast ptr %g to ptr
+  store <2 x i32> %t, ptr %hb, align 4
   %index.next = add nuw i64 %index, 8
   %c = icmp eq i64 %index.next, %n
   br i1 %c, label %exit, label %loop
@@ -569,7 +569,7 @@ exit:
   ret void
 }
 
-define void @sink_v4i64_1(i32 *%p, i32 *%d, i64 %n, <2 x i32> %a) {
+define void @sink_v4i64_1(ptr %p, ptr %d, i64 %n, <2 x i32> %a) {
 ; CHECK-LABEL: sink_v4i64_1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -594,16 +594,16 @@ entry:
 
 loop:
   %index = phi i64 [ 0, %entry ], [ %index.next, %loop ]
-  %g = getelementptr inbounds i32, i32 *%p, i64 %index
-  %gb = bitcast i32* %g to <4 x i32>*
-  %l = load <4 x i32>, <4 x i32> *%gb, align 4
+  %g = getelementptr inbounds i32, ptr %p, i64 %index
+  %gb = bitcast ptr %g to ptr
+  %l = load <4 x i32>, ptr %gb, align 4
   %e = sext <4 x i32> %l to <4 x i64>
   %m = mul <4 x i64> %e, %broadcast.splat
   %s = ashr <4 x i64> %m, <i64 15, i64 15, i64 15, i64 15>
   %t = trunc <4 x i64> %s to <4 x i32>
-  %h = getelementptr inbounds i32, i32 *%d, i64 %index
-  %hb = bitcast i32* %g to <4 x i32>*
-  store <4 x i32> %t, <4 x i32> *%hb, align 4
+  %h = getelementptr inbounds i32, ptr %d, i64 %index
+  %hb = bitcast ptr %g to ptr
+  store <4 x i32> %t, ptr %hb, align 4
   %index.next = add nuw i64 %index, 8
   %c = icmp eq i64 %index.next, %n
   br i1 %c, label %exit, label %loop
@@ -612,7 +612,7 @@ exit:
   ret void
 }
 
-define void @sink_v8z16_0(i32 *%p, i32 *%d, i64 %n, <16 x i8> %a) {
+define void @sink_v8z16_0(ptr %p, ptr %d, i64 %n, <16 x i8> %a) {
 ; CHECK-LABEL: sink_v8z16_0:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    dup v0.8b, v0.b[0]
@@ -636,16 +636,16 @@ entry:
 
 loop:
   %index = phi i64 [ 0, %entry ], [ %index.next, %loop ]
-  %g = getelementptr inbounds i32, i32 *%p, i64 %index
-  %gb = bitcast i32* %g to <8 x i8>*
-  %l = load <8 x i8>, <8 x i8> *%gb, align 4
+  %g = getelementptr inbounds i32, ptr %p, i64 %index
+  %gb = bitcast ptr %g to ptr
+  %l = load <8 x i8>, ptr %gb, align 4
   %e = zext <8 x i8> %l to <8 x i16>
   %m = mul <8 x i16> %e, %broadcast.splat
   %s = ashr <8 x i16> %m, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
   %t = trunc <8 x i16> %s to <8 x i8>
-  %h = getelementptr inbounds i32, i32 *%d, i64 %index
-  %hb = bitcast i32* %g to <8 x i8>*
-  store <8 x i8> %t, <8 x i8> *%hb, align 4
+  %h = getelementptr inbounds i32, ptr %d, i64 %index
+  %hb = bitcast ptr %g to ptr
+  store <8 x i8> %t, ptr %hb, align 4
   %index.next = add nuw i64 %index, 8
   %c = icmp eq i64 %index.next, %n
   br i1 %c, label %exit, label %loop
@@ -654,7 +654,7 @@ exit:
   ret void
 }
 
-define void @sink_v16s16_8(i32 *%p, i32 *%d, i64 %n, <16 x i8> %a) {
+define void @sink_v16s16_8(ptr %p, ptr %d, i64 %n, <16 x i8> %a) {
 ; CHECK-LABEL: sink_v16s16_8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    dup v0.16b, v0.b[10]
@@ -680,16 +680,16 @@ entry:
 
 loop:
   %index = phi i64 [ 0, %entry ], [ %index.next, %loop ]
-  %g = getelementptr inbounds i32, i32 *%p, i64 %index
-  %gb = bitcast i32* %g to <16 x i8>*
-  %l = load <16 x i8>, <16 x i8> *%gb, align 4
+  %g = getelementptr inbounds i32, ptr %p, i64 %index
+  %gb = bitcast ptr %g to ptr
+  %l = load <16 x i8>, ptr %gb, align 4
   %e = sext <16 x i8> %l to <16 x i16>
   %m = mul <16 x i16> %e, %broadcast.splat
   %s = ashr <16 x i16> %m, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
   %t = trunc <16 x i16> %s to <16 x i8>
-  %h = getelementptr inbounds i32, i32 *%d, i64 %index
-  %hb = bitcast i32* %g to <16 x i8>*
-  store <16 x i8> %t, <16 x i8> *%hb, align 4
+  %h = getelementptr inbounds i32, ptr %d, i64 %index
+  %hb = bitcast ptr %g to ptr
+  store <16 x i8> %t, ptr %hb, align 4
   %index.next = add nuw i64 %index, 8
   %c = icmp eq i64 %index.next, %n
   br i1 %c, label %exit, label %loop
@@ -698,7 +698,7 @@ exit:
   ret void
 }
 
-define void @matrix_mul_unsigned_and(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i32 %val) {
+define void @matrix_mul_unsigned_and(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %val) {
 ; CHECK-LABEL: matrix_mul_unsigned_and:
 ; CHECK:       // %bb.0: // %vector.header
 ; CHECK-NEXT:    and w8, w3, #0xffff
@@ -738,22 +738,22 @@ vector.body:                                      ; preds = %vector.header, %vec
   %3 = trunc i64 %index to i32
   %4 = add i32 %N, %3
   %5 = zext i32 %4 to i64
-  %6 = getelementptr inbounds i16, i16* %A, i64 %5
-  %7 = bitcast i16* %6 to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %7, align 2
-  %8 = getelementptr inbounds i16, i16* %6, i64 4
-  %9 = bitcast i16* %8 to <4 x i16>*
-  %wide.load30 = load <4 x i16>, <4 x i16>* %9, align 2
+  %6 = getelementptr inbounds i16, ptr %A, i64 %5
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <4 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %6, i64 4
+  %9 = bitcast ptr %8 to ptr
+  %wide.load30 = load <4 x i16>, ptr %9, align 2
   %10 = zext <4 x i16> %wide.load to <4 x i32>
   %11 = zext <4 x i16> %wide.load30 to <4 x i32>
   %12 = mul nuw nsw <4 x i32> %broadcast.splat, %10
   %13 = mul nuw nsw <4 x i32> %broadcast.splat32, %11
-  %14 = getelementptr inbounds i32, i32* %C, i64 %5
-  %15 = bitcast i32* %14 to <4 x i32>*
-  store <4 x i32> %12, <4 x i32>* %15, align 4
-  %16 = getelementptr inbounds i32, i32* %14, i64 4
-  %17 = bitcast i32* %16 to <4 x i32>*
-  store <4 x i32> %13, <4 x i32>* %17, align 4
+  %14 = getelementptr inbounds i32, ptr %C, i64 %5
+  %15 = bitcast ptr %14 to ptr
+  store <4 x i32> %12, ptr %15, align 4
+  %16 = getelementptr inbounds i32, ptr %14, i64 4
+  %17 = bitcast ptr %16 to ptr
+  store <4 x i32> %13, ptr %17, align 4
   %index.next = add i64 %index, 8
   %18 = icmp eq i64 %index.next, %n.vec
   br i1 %18, label %for.end12, label %vector.body
@@ -762,7 +762,7 @@ for.end12:                                        ; preds = %vector.body
   ret void
 }
 
-define void @matrix_mul_unsigned_and_double(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i32 %val) {
+define void @matrix_mul_unsigned_and_double(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %val) {
 ; CHECK-LABEL: matrix_mul_unsigned_and_double:
 ; CHECK:       // %bb.0: // %vector.header
 ; CHECK-NEXT:    and w8, w3, #0xffff
@@ -806,22 +806,22 @@ vector.body:                                      ; preds = %vector.header, %vec
   %3 = trunc i64 %index to i32
   %4 = add i32 %N, %3
   %5 = zext i32 %4 to i64
-  %6 = getelementptr inbounds i16, i16* %A, i64 %5
-  %7 = bitcast i16* %6 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %7, align 2
-  %8 = getelementptr inbounds i16, i16* %6, i64 4
-  %9 = bitcast i16* %8 to <8 x i16>*
-  %wide.load30 = load <8 x i16>, <8 x i16>* %9, align 2
+  %6 = getelementptr inbounds i16, ptr %A, i64 %5
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <8 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %6, i64 4
+  %9 = bitcast ptr %8 to ptr
+  %wide.load30 = load <8 x i16>, ptr %9, align 2
   %10 = zext <8 x i16> %wide.load to <8 x i32>
   %11 = zext <8 x i16> %wide.load30 to <8 x i32>
   %12 = mul nuw nsw <8 x i32> %broadcast.splat, %10
   %13 = mul nuw nsw <8 x i32> %broadcast.splat32, %11
-  %14 = getelementptr inbounds i32, i32* %C, i64 %5
-  %15 = bitcast i32* %14 to <8 x i32>*
-  store <8 x i32> %12, <8 x i32>* %15, align 4
-  %16 = getelementptr inbounds i32, i32* %14, i64 8
-  %17 = bitcast i32* %16 to <8 x i32>*
-  store <8 x i32> %13, <8 x i32>* %17, align 4
+  %14 = getelementptr inbounds i32, ptr %C, i64 %5
+  %15 = bitcast ptr %14 to ptr
+  store <8 x i32> %12, ptr %15, align 4
+  %16 = getelementptr inbounds i32, ptr %14, i64 8
+  %17 = bitcast ptr %16 to ptr
+  store <8 x i32> %13, ptr %17, align 4
   %index.next = add i64 %index, 16
   %18 = icmp eq i64 %index.next, %n.vec
   br i1 %18, label %for.end12, label %vector.body
@@ -830,7 +830,7 @@ for.end12:                                        ; preds = %vector.body
   ret void
 }
 
-define void @matrix_mul_signed_and(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i32 %val) {
+define void @matrix_mul_signed_and(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %val) {
 ; CHECK-LABEL: matrix_mul_signed_and:
 ; CHECK:       // %bb.0: // %vector.header
 ; CHECK-NEXT:    and w8, w3, #0xffff
@@ -872,22 +872,22 @@ vector.body:                                      ; preds = %vector.header, %vec
   %3 = trunc i64 %index to i32
   %4 = add i32 %N, %3
   %5 = zext i32 %4 to i64
-  %6 = getelementptr inbounds i16, i16* %A, i64 %5
-  %7 = bitcast i16* %6 to <4 x i16>*
-  %wide.load = load <4 x i16>, <4 x i16>* %7, align 2
-  %8 = getelementptr inbounds i16, i16* %6, i64 4
-  %9 = bitcast i16* %8 to <4 x i16>*
-  %wide.load30 = load <4 x i16>, <4 x i16>* %9, align 2
+  %6 = getelementptr inbounds i16, ptr %A, i64 %5
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <4 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %6, i64 4
+  %9 = bitcast ptr %8 to ptr
+  %wide.load30 = load <4 x i16>, ptr %9, align 2
   %10 = sext <4 x i16> %wide.load to <4 x i32>
   %11 = sext <4 x i16> %wide.load30 to <4 x i32>
   %12 = mul nuw nsw <4 x i32> %broadcast.splat, %10
   %13 = mul nuw nsw <4 x i32> %broadcast.splat32, %11
-  %14 = getelementptr inbounds i32, i32* %C, i64 %5
-  %15 = bitcast i32* %14 to <4 x i32>*
-  store <4 x i32> %12, <4 x i32>* %15, align 4
-  %16 = getelementptr inbounds i32, i32* %14, i64 4
-  %17 = bitcast i32* %16 to <4 x i32>*
-  store <4 x i32> %13, <4 x i32>* %17, align 4
+  %14 = getelementptr inbounds i32, ptr %C, i64 %5
+  %15 = bitcast ptr %14 to ptr
+  store <4 x i32> %12, ptr %15, align 4
+  %16 = getelementptr inbounds i32, ptr %14, i64 4
+  %17 = bitcast ptr %16 to ptr
+  store <4 x i32> %13, ptr %17, align 4
   %index.next = add i64 %index, 8
   %18 = icmp eq i64 %index.next, %n.vec
   br i1 %18, label %for.end12, label %vector.body
@@ -896,7 +896,7 @@ for.end12:                                        ; preds = %vector.body
   ret void
 }
 
-define void @matrix_mul_signed_and_double(i32 %N, i32* nocapture %C, i16* nocapture readonly %A, i32 %val) {
+define void @matrix_mul_signed_and_double(i32 %N, ptr nocapture %C, ptr nocapture readonly %A, i32 %val) {
 ; CHECK-LABEL: matrix_mul_signed_and_double:
 ; CHECK:       // %bb.0: // %vector.header
 ; CHECK-NEXT:    and w8, w3, #0xffff
@@ -944,22 +944,22 @@ vector.body:                                      ; preds = %vector.header, %vec
   %3 = trunc i64 %index to i32
   %4 = add i32 %N, %3
   %5 = zext i32 %4 to i64
-  %6 = getelementptr inbounds i16, i16* %A, i64 %5
-  %7 = bitcast i16* %6 to <8 x i16>*
-  %wide.load = load <8 x i16>, <8 x i16>* %7, align 2
-  %8 = getelementptr inbounds i16, i16* %6, i64 4
-  %9 = bitcast i16* %8 to <8 x i16>*
-  %wide.load30 = load <8 x i16>, <8 x i16>* %9, align 2
+  %6 = getelementptr inbounds i16, ptr %A, i64 %5
+  %7 = bitcast ptr %6 to ptr
+  %wide.load = load <8 x i16>, ptr %7, align 2
+  %8 = getelementptr inbounds i16, ptr %6, i64 4
+  %9 = bitcast ptr %8 to ptr
+  %wide.load30 = load <8 x i16>, ptr %9, align 2
   %10 = sext <8 x i16> %wide.load to <8 x i32>
   %11 = sext <8 x i16> %wide.load30 to <8 x i32>
   %12 = mul nuw nsw <8 x i32> %broadcast.splat, %10
   %13 = mul nuw nsw <8 x i32> %broadcast.splat32, %11
-  %14 = getelementptr inbounds i32, i32* %C, i64 %5
-  %15 = bitcast i32* %14 to <8 x i32>*
-  store <8 x i32> %12, <8 x i32>* %15, align 4
-  %16 = getelementptr inbounds i32, i32* %14, i64 8
-  %17 = bitcast i32* %16 to <8 x i32>*
-  store <8 x i32> %13, <8 x i32>* %17, align 4
+  %14 = getelementptr inbounds i32, ptr %C, i64 %5
+  %15 = bitcast ptr %14 to ptr
+  store <8 x i32> %12, ptr %15, align 4
+  %16 = getelementptr inbounds i32, ptr %14, i64 8
+  %17 = bitcast ptr %16 to ptr
+  store <8 x i32> %13, ptr %17, align 4
   %index.next = add i64 %index, 16
   %18 = icmp eq i64 %index.next, %n.vec
   br i1 %18, label %for.end12, label %vector.body

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir b/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir
index 3ff117f58aa77..16e2de751381a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir
+++ b/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir
@@ -42,10 +42,10 @@
   @.str = private unnamed_addr constant [17 x i8] c"Argument %d: %s\0A\00", align 1
   
   ; Function Attrs: nounwind
-  define dso_local i32 @main(i32 %argc, i8** nocapture readonly %argv) local_unnamed_addr #0 !dbg !7 {
+  define dso_local i32 @main(i32 %argc, ptr nocapture readonly %argv) local_unnamed_addr #0 !dbg !7 {
   entry:
     call void @llvm.dbg.value(metadata i32 %argc, metadata !15, metadata !DIExpression()), !dbg !19
-    call void @llvm.dbg.value(metadata i8** %argv, metadata !16, metadata !DIExpression()), !dbg !19
+    call void @llvm.dbg.value(metadata ptr %argv, metadata !16, metadata !DIExpression()), !dbg !19
     call void @llvm.dbg.value(metadata i32 1, metadata !17, metadata !DIExpression()), !dbg !20
     %cmp5 = icmp sgt i32 %argc, 1, !dbg !21
     br i1 %cmp5, label %for.body.preheader, label %for.cond.cleanup, !dbg !23
@@ -60,10 +60,10 @@
   for.body:                                         ; preds = %for.body, %for.body.preheader
     %indvars.iv = phi i64 [ 1, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
     call void @llvm.dbg.value(metadata i64 %indvars.iv, metadata !17, metadata !DIExpression()), !dbg !20
-    %scevgep = getelementptr i8*, i8** %argv, i64 %indvars.iv, !dbg !25
-    %0 = load i8*, i8** %scevgep, align 8, !dbg !25, !tbaa !28
+    %scevgep = getelementptr ptr, ptr %argv, i64 %indvars.iv, !dbg !25
+    %0 = load ptr, ptr %scevgep, align 8, !dbg !25, !tbaa !28
     %tmp = trunc i64 %indvars.iv to i32
-    %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str, i64 0, i64 0), i32 %tmp, i8* %0), !dbg !32
+    %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %tmp, ptr %0), !dbg !32
     %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !33
     call void @llvm.dbg.value(metadata i32 undef, metadata !17, metadata !DIExpression(DW_OP_plus_uconst, 1, DW_OP_stack_value)), !dbg !20
     %exitcond = icmp eq i64 %wide.trip.count, %indvars.iv.next, !dbg !21
@@ -71,13 +71,13 @@
   }
   
   ; Function Attrs: nounwind
-  declare dso_local i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr #1
+  declare dso_local i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr #1
   
   ; Function Attrs: nounwind readnone speculatable
   declare void @llvm.dbg.value(metadata, metadata, metadata) #2
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #3
+  declare void @llvm.stackprotector(ptr, ptr) #3
 
   attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll
index ec8fd3aa38022..8df4b26a3b531 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes-neoverse.ll
@@ -12,7 +12,7 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a75  < %s -o -| FileCheck %s --check-prefixes=CHECK,CHECK-8
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a710 < %s -o -| FileCheck %s --check-prefixes=CHECK,CHECK-16
 
-define i32 @a(i32 %x, i32* nocapture readonly %y, i32* nocapture readonly %z) {
+define i32 @a(i32 %x, ptr nocapture readonly %y, ptr nocapture readonly %z) {
 ; CHECK-DEFAULT:    .p2align 5
 ; CHECK-8:          .p2align 4, , 8
 ; CHECK-16:         .p2align 5, , 16
@@ -38,18 +38,18 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %10, %vector.body ]
   %vec.phi13 = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %11, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %y, i64 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = getelementptr inbounds i32, i32* %0, i64 4
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load14 = load <4 x i32>, <4 x i32>* %3, align 4
-  %4 = getelementptr inbounds i32, i32* %z, i64 %index
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load15 = load <4 x i32>, <4 x i32>* %5, align 4
-  %6 = getelementptr inbounds i32, i32* %4, i64 4
-  %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load16 = load <4 x i32>, <4 x i32>* %7, align 4
+  %0 = getelementptr inbounds i32, ptr %y, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x i32>, ptr %1, align 4
+  %2 = getelementptr inbounds i32, ptr %0, i64 4
+  %3 = bitcast ptr %2 to ptr
+  %wide.load14 = load <4 x i32>, ptr %3, align 4
+  %4 = getelementptr inbounds i32, ptr %z, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load15 = load <4 x i32>, ptr %5, align 4
+  %6 = getelementptr inbounds i32, ptr %4, i64 4
+  %7 = bitcast ptr %6 to ptr
+  %wide.load16 = load <4 x i32>, ptr %7, align 4
   %8 = add <4 x i32> %wide.load, %vec.phi
   %9 = add <4 x i32> %wide.load14, %vec.phi13
   %10 = add <4 x i32> %8, %wide.load15
@@ -76,10 +76,10 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 for.body:                                         ; preds = %for.body.preheader17, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader17 ]
   %b.011 = phi i32 [ %add3, %for.body ], [ %b.011.ph, %for.body.preheader17 ]
-  %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
-  %14 = load i32, i32* %arrayidx, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %z, i64 %indvars.iv
-  %15 = load i32, i32* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvars.iv
+  %14 = load i32, ptr %arrayidx, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %z, i64 %indvars.iv
+  %15 = load i32, ptr %arrayidx2, align 4
   %add = add i32 %14, %b.011
   %add3 = add i32 %add, %15
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll
index a3dff36aba2da..99e0d06bf4218 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-p2align-max-bytes.ll
@@ -7,7 +7,7 @@
 ; is padded as expected. The key interest in the CHECK-OBJ-* sections is the size of the padding region (the nops),
 ; and not the exact instructions either side of them (But the last instruction of the EXPLICIT and IMPLICIT checks
 ; should be the same, at 
diff erent locations)
-define i32 @a(i32 %x, i32* nocapture readonly %y, i32* nocapture readonly %z) {
+define i32 @a(i32 %x, ptr nocapture readonly %y, ptr nocapture readonly %z) {
 ; CHECK-LABEL: a:
 ; CHECK-EXPLICIT:    .p2align 5, , 8
 ; CHECK-IMPLICIT:    .p2align 5
@@ -41,18 +41,18 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %10, %vector.body ]
   %vec.phi13 = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %11, %vector.body ]
-  %0 = getelementptr inbounds i32, i32* %y, i64 %index
-  %1 = bitcast i32* %0 to <4 x i32>*
-  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
-  %2 = getelementptr inbounds i32, i32* %0, i64 4
-  %3 = bitcast i32* %2 to <4 x i32>*
-  %wide.load14 = load <4 x i32>, <4 x i32>* %3, align 4
-  %4 = getelementptr inbounds i32, i32* %z, i64 %index
-  %5 = bitcast i32* %4 to <4 x i32>*
-  %wide.load15 = load <4 x i32>, <4 x i32>* %5, align 4
-  %6 = getelementptr inbounds i32, i32* %4, i64 4
-  %7 = bitcast i32* %6 to <4 x i32>*
-  %wide.load16 = load <4 x i32>, <4 x i32>* %7, align 4
+  %0 = getelementptr inbounds i32, ptr %y, i64 %index
+  %1 = bitcast ptr %0 to ptr
+  %wide.load = load <4 x i32>, ptr %1, align 4
+  %2 = getelementptr inbounds i32, ptr %0, i64 4
+  %3 = bitcast ptr %2 to ptr
+  %wide.load14 = load <4 x i32>, ptr %3, align 4
+  %4 = getelementptr inbounds i32, ptr %z, i64 %index
+  %5 = bitcast ptr %4 to ptr
+  %wide.load15 = load <4 x i32>, ptr %5, align 4
+  %6 = getelementptr inbounds i32, ptr %4, i64 4
+  %7 = bitcast ptr %6 to ptr
+  %wide.load16 = load <4 x i32>, ptr %7, align 4
   %8 = add <4 x i32> %wide.load, %vec.phi
   %9 = add <4 x i32> %wide.load14, %vec.phi13
   %10 = add <4 x i32> %8, %wide.load15
@@ -79,10 +79,10 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 for.body:                                         ; preds = %for.body.preheader17, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader17 ]
   %b.011 = phi i32 [ %add3, %for.body ], [ %b.011.ph, %for.body.preheader17 ]
-  %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
-  %14 = load i32, i32* %arrayidx, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %z, i64 %indvars.iv
-  %15 = load i32, i32* %arrayidx2, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvars.iv
+  %14 = load i32, ptr %arrayidx, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %z, i64 %indvars.iv
+  %15 = load i32, ptr %arrayidx2, align 4
   %add = add i32 %14, %b.011
   %add3 = add i32 %add, %15
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1

diff  --git a/llvm/test/CodeGen/AArch64/add-i256.ll b/llvm/test/CodeGen/AArch64/add-i256.ll
index e299d1672fe7a..46385eb7b4376 100644
--- a/llvm/test/CodeGen/AArch64/add-i256.ll
+++ b/llvm/test/CodeGen/AArch64/add-i256.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mcpu=neoverse-n1 < %s | FileCheck %s
 target triple = "aarch64-linux-unknown"
 
-define void @add_i256(i64 %x0, i64 %x1, i64 %x2, i64 %x3, i64 %y1, i64 %y2, i64 %y3, i8* %store_addr_ptr) {
+define void @add_i256(i64 %x0, i64 %x1, i64 %x2, i64 %x3, i64 %y1, i64 %y2, i64 %y3, ptr %store_addr_ptr) {
 ; CHECK-LABEL: add_i256:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adds x8, x0, #1
@@ -50,16 +50,16 @@ entry:
   %split_64bits78 = lshr i256 %z_256, 192
   %z3 = trunc i256 %split_64bits78 to i64
 
-  %outptr0 = bitcast i8* %store_addr_ptr to i64*
-  store i64 %z0, i64* %outptr0, align 4
-  %gep = getelementptr i8, i8* %store_addr_ptr, i64 8
-  %outptr1 = bitcast i8* %gep to i64*
-  store i64 %z1, i64* %outptr1, align 4
-  %store_addr_ofs = getelementptr i8, i8* %store_addr_ptr, i64 16
-  %outptr081 = bitcast i8* %store_addr_ofs to i64*
-  store i64 %z2, i64* %outptr081, align 4
-  %gep82 = getelementptr i8, i8* %store_addr_ofs, i64 8
-  %outptr183 = bitcast i8* %gep82 to i64*
-  store i64 %z3, i64* %outptr183, align 4
+  %outptr0 = bitcast ptr %store_addr_ptr to ptr
+  store i64 %z0, ptr %outptr0, align 4
+  %gep = getelementptr i8, ptr %store_addr_ptr, i64 8
+  %outptr1 = bitcast ptr %gep to ptr
+  store i64 %z1, ptr %outptr1, align 4
+  %store_addr_ofs = getelementptr i8, ptr %store_addr_ptr, i64 16
+  %outptr081 = bitcast ptr %store_addr_ofs to ptr
+  store i64 %z2, ptr %outptr081, align 4
+  %gep82 = getelementptr i8, ptr %store_addr_ofs, i64 8
+  %outptr183 = bitcast ptr %gep82 to ptr
+  store i64 %z3, ptr %outptr183, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/addrsig-macho.ll b/llvm/test/CodeGen/AArch64/addrsig-macho.ll
index 62bc764e0251b..910f635467bab 100644
--- a/llvm/test/CodeGen/AArch64/addrsig-macho.ll
+++ b/llvm/test/CodeGen/AArch64/addrsig-macho.ll
@@ -88,31 +88,31 @@ entry:
 }
 
 
-define void()* @f1() {
-  %f1 = bitcast void()* ()* @f1 to i8*
-  %f2 = bitcast void()* ()* @f2 to i8*
-  %f3 = bitcast void()* @f3 to i8*
-  %g1 = bitcast i32* @g1 to i8*
-  %g2 = bitcast i32* @g2 to i8*
-  %g3 = bitcast i32* @g3 to i8*
-  %dllimport = bitcast i32* @dllimport to i8*
-  %tls = bitcast i32* @tls to i8*
-  %a1 = bitcast i32* @a1 to i8*
-  %a2 = bitcast i32* @a2 to i8*
-  %i1 = bitcast void()* @i1 to i8*
-  %i2 = bitcast void()* @i2 to i8*
-  call void @llvm.dbg.value(metadata i8* bitcast (void()* @metadata_f1 to i8*), metadata !6, metadata !DIExpression()), !dbg !8
-  call void @llvm.dbg.value(metadata i8* bitcast (void()* @metadata_f2 to i8*), metadata !6, metadata !DIExpression()), !dbg !8
-  call void @f4(i8* bitcast (void()* @metadata_f2 to i8*))
+define ptr @f1() {
+  %f1 = bitcast ptr @f1 to ptr
+  %f2 = bitcast ptr @f2 to ptr
+  %f3 = bitcast ptr @f3 to ptr
+  %g1 = bitcast ptr @g1 to ptr
+  %g2 = bitcast ptr @g2 to ptr
+  %g3 = bitcast ptr @g3 to ptr
+  %dllimport = bitcast ptr @dllimport to ptr
+  %tls = bitcast ptr @tls to ptr
+  %a1 = bitcast ptr @a1 to ptr
+  %a2 = bitcast ptr @a2 to ptr
+  %i1 = bitcast ptr @i1 to ptr
+  %i2 = bitcast ptr @i2 to ptr
+  call void @llvm.dbg.value(metadata ptr @metadata_f1, metadata !6, metadata !DIExpression()), !dbg !8
+  call void @llvm.dbg.value(metadata ptr @metadata_f2, metadata !6, metadata !DIExpression()), !dbg !8
+  call void @f4(ptr @metadata_f2)
   unreachable
 }
 
-declare void @f4(i8*) unnamed_addr
+declare void @f4(ptr) unnamed_addr
 
 declare void @metadata_f1()
 declare void @metadata_f2()
 
-define internal void()* @f2() local_unnamed_addr {
+define internal ptr @f2() local_unnamed_addr {
   unreachable
 }
 
@@ -128,11 +128,11 @@ declare void @f3() unnamed_addr
 
 @tls = thread_local global i32 0
 
- at a1 = alias i32, i32* @g1
- at a2 = internal local_unnamed_addr alias i32, i32* @g2
+ at a1 = alias i32, ptr @g1
+ at a2 = internal local_unnamed_addr alias i32, ptr @g2
 
- at i1 = ifunc void(), void()* ()* @f1
- at i2 = internal local_unnamed_addr ifunc void(), void()* ()* @f2
+ at i1 = ifunc void(), ptr @f1
+ at i2 = internal local_unnamed_addr ifunc void(), ptr @f2
 
 declare void @llvm.dbg.value(metadata, metadata, metadata)
 

diff  --git a/llvm/test/CodeGen/AArch64/align-down.ll b/llvm/test/CodeGen/AArch64/align-down.ll
index 767a1dff445d1..4b1cdfd2770f6 100644
--- a/llvm/test/CodeGen/AArch64/align-down.ll
+++ b/llvm/test/CodeGen/AArch64/align-down.ll
@@ -51,7 +51,7 @@ define i32 @t2_commutative(i32 %ptr, i32 %alignment) nounwind {
 
 ; Extra use tests
 
-define i32 @t3_extrause0(i32 %ptr, i32 %alignment, i32* %mask_storage) nounwind {
+define i32 @t3_extrause0(i32 %ptr, i32 %alignment, ptr %mask_storage) nounwind {
 ; CHECK-LABEL: t3_extrause0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -60,12 +60,12 @@ define i32 @t3_extrause0(i32 %ptr, i32 %alignment, i32* %mask_storage) nounwind
 ; CHECK-NEXT:    str w9, [x2]
 ; CHECK-NEXT:    ret
   %mask = add i32 %alignment, -1
-  store i32 %mask, i32* %mask_storage
+  store i32 %mask, ptr %mask_storage
   %bias = and i32 %ptr, %mask
   %r = sub i32 %ptr, %bias
   ret i32 %r
 }
-define i32 @n4_extrause1(i32 %ptr, i32 %alignment, i32* %bias_storage) nounwind {
+define i32 @n4_extrause1(i32 %ptr, i32 %alignment, ptr %bias_storage) nounwind {
 ; CHECK-LABEL: n4_extrause1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w1, #1
@@ -75,11 +75,11 @@ define i32 @n4_extrause1(i32 %ptr, i32 %alignment, i32* %bias_storage) nounwind
 ; CHECK-NEXT:    ret
   %mask = add i32 %alignment, -1
   %bias = and i32 %ptr, %mask ; has extra uses, can't fold
-  store i32 %bias, i32* %bias_storage
+  store i32 %bias, ptr %bias_storage
   %r = sub i32 %ptr, %bias
   ret i32 %r
 }
-define i32 @n5_extrause2(i32 %ptr, i32 %alignment, i32* %mask_storage, i32* %bias_storage) nounwind {
+define i32 @n5_extrause2(i32 %ptr, i32 %alignment, ptr %mask_storage, ptr %bias_storage) nounwind {
 ; CHECK-LABEL: n5_extrause2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w1, #1
@@ -89,9 +89,9 @@ define i32 @n5_extrause2(i32 %ptr, i32 %alignment, i32* %mask_storage, i32* %bia
 ; CHECK-NEXT:    str w9, [x3]
 ; CHECK-NEXT:    ret
   %mask = add i32 %alignment, -1
-  store i32 %mask, i32* %mask_storage
+  store i32 %mask, ptr %mask_storage
   %bias = and i32 %ptr, %mask ; has extra uses, can't fold
-  store i32 %bias, i32* %bias_storage
+  store i32 %bias, ptr %bias_storage
   %r = sub i32 %ptr, %bias
   ret i32 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
index 0b2acdf102c2c..1b9db8b70a1fb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
@@ -13,9 +13,9 @@
 ; Function Attrs: noinline nounwind ssp
 define void @foo(i32 %t) {
 entry:
-  %tmp = load i32, i32* @a, align 4
+  %tmp = load i32, ptr @a, align 4
   %add = add nsw i32 %tmp, %t
-  store i32 %add, i32* @a, align 4
+  store i32 %add, ptr @a, align 4
   ret void
 }
 
@@ -33,22 +33,22 @@ entry:
   br i1 %cmp, label %if.then, label %if.end4
 
 if.then:                                          ; preds = %entry
-  %tmp = load i32, i32* @a, align 4
+  %tmp = load i32, ptr @a, align 4
   %add = add nsw i32 %tmp, %t
   %cmp1 = icmp sgt i32 %add, 12
   br i1 %cmp1, label %if.then2, label %if.end4
 
 if.then2:                                         ; preds = %if.then
   tail call void @foo(i32 %add)
-  %tmp1 = load i32, i32* @a, align 4
+  %tmp1 = load i32, ptr @a, align 4
   br label %if.end4
 
 if.end4:                                          ; preds = %if.then2, %if.then, %entry
   %t.addr.0 = phi i32 [ %tmp1, %if.then2 ], [ %t, %if.then ], [ %t, %entry ]
-  %tmp2 = load i32, i32* @b, align 4
+  %tmp2 = load i32, ptr @b, align 4
   %add5 = add nsw i32 %tmp2, %t.addr.0
   tail call void @foo(i32 %add5)
-  %tmp3 = load i32, i32* @b, align 4
+  %tmp3 = load i32, ptr @b, align 4
   %add6 = add nsw i32 %tmp3, %t.addr.0
   ret i32 %add6
 }
@@ -67,7 +67,7 @@ if.end4:                                          ; preds = %if.then2, %if.then,
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getC() {
-  %res = load i32, i32* @C, align 4
+  %res = load i32, ptr @C, align 4
   ret i32 %res
 }
 
@@ -83,7 +83,7 @@ define i32 @getC() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExtC() {
-  %res = load i32, i32* @C, align 4
+  %res = load i32, ptr @C, align 4
   %sextres = sext i32 %res to i64
   ret i64 %sextres
 }
@@ -103,9 +103,9 @@ define i64 @getSExtC() {
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define void @getSeveralC(i32 %t) {
 entry:
-  %tmp = load i32, i32* @C, align 4
+  %tmp = load i32, ptr @C, align 4
   %add = add nsw i32 %tmp, %t
-  store i32 %add, i32* @C, align 4
+  store i32 %add, ptr @C, align 4
   ret void
 }
 
@@ -122,7 +122,7 @@ entry:
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define void @setC(i32 %t) {
 entry:
-  store i32 %t, i32* @C, align 4
+  store i32 %t, ptr @C, align 4
   ret void
 }
 
@@ -143,8 +143,8 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpAddLdr [[ADRP_LABEL]], [[ADDGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getInternalCPlus4() {
-  %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
-  %res = load i32, i32* %addr, align 4
+  %addr = getelementptr inbounds i32, ptr @InternalC, i32 4
+  %res = load i32, ptr %addr, align 4
   ret i32 %res
 }
 
@@ -160,8 +160,8 @@ define i32 @getInternalCPlus4() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpAddLdr [[ADRP_LABEL]], [[ADDGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExtInternalCPlus4() {
-  %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
-  %res = load i32, i32* %addr, align 4
+  %addr = getelementptr inbounds i32, ptr @InternalC, i32 4
+  %res = load i32, ptr %addr, align 4
   %sextres = sext i32 %res to i64
   ret i64 %sextres
 }
@@ -181,10 +181,10 @@ define i64 @getSExtInternalCPlus4() {
 ; CHECK: .loh AdrpAdd [[ADRP_LABEL]], [[ADDGOT_LABEL]]
 define void @getSeveralInternalCPlus4(i32 %t) {
 entry:
-  %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
-  %tmp = load i32, i32* %addr, align 4
+  %addr = getelementptr inbounds i32, ptr @InternalC, i32 4
+  %tmp = load i32, ptr %addr, align 4
   %add = add nsw i32 %tmp, %t
-  store i32 %add, i32* %addr, align 4
+  store i32 %add, ptr %addr, align 4
   ret void
 }
 
@@ -201,8 +201,8 @@ entry:
 ; CHECK: .loh AdrpAddStr [[ADRP_LABEL]], [[ADDGOT_LABEL]], [[LDR_LABEL]]
 define void @setInternalCPlus4(i32 %t) {
 entry:
-  %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
-  store i32 %t, i32* %addr, align 4
+  %addr = getelementptr inbounds i32, ptr @InternalC, i32 4
+  store i32 %t, ptr %addr, align 4
   ret void
 }
 
@@ -216,7 +216,7 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdr [[ADRP_LABEL]], [[LDR_LABEL]]
 define i32 @getInternalC() {
-  %res = load i32, i32* @InternalC, align 4
+  %res = load i32, ptr @InternalC, align 4
   ret i32 %res
 }
 
@@ -230,7 +230,7 @@ define i32 @getInternalC() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdr [[ADRP_LABEL]], [[LDR_LABEL]]
 define i64 @getSExtInternalC() {
-  %res = load i32, i32* @InternalC, align 4
+  %res = load i32, ptr @InternalC, align 4
   %sextres = sext i32 %res to i64
   ret i64 %sextres
 }
@@ -247,9 +247,9 @@ define i64 @getSExtInternalC() {
 ; CHECK-NEXT: ret
 define void @getSeveralInternalC(i32 %t) {
 entry:
-  %tmp = load i32, i32* @InternalC, align 4
+  %tmp = load i32, ptr @InternalC, align 4
   %add = add nsw i32 %tmp, %t
-  store i32 %add, i32* @InternalC, align 4
+  store i32 %add, ptr @InternalC, align 4
   ret void
 }
 
@@ -263,7 +263,7 @@ entry:
 ; CHECK-NEXT: ret
 define void @setInternalC(i32 %t) {
 entry:
-  store i32 %t, i32* @InternalC, align 4
+  store i32 %t, ptr @InternalC, align 4
   ret void
 }
 
@@ -282,7 +282,7 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define i8 @getD() {
-  %res = load i8, i8* @D, align 4
+  %res = load i8, ptr @D, align 4
   ret i8 %res
 }
 
@@ -296,7 +296,7 @@ define i8 @getD() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setD(i8 %t) {
-  store i8 %t, i8* @D, align 4
+  store i8 %t, ptr @D, align 4
   ret void
 }
 
@@ -312,7 +312,7 @@ define void @setD(i8 %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getSExtD() {
-  %res = load i8, i8* @D, align 4
+  %res = load i8, ptr @D, align 4
   %sextres = sext i8 %res to i32
   ret i32 %sextres
 }
@@ -329,7 +329,7 @@ define i32 @getSExtD() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExt64D() {
-  %res = load i8, i8* @D, align 4
+  %res = load i8, ptr @D, align 4
   %sextres = sext i8 %res to i64
   ret i64 %sextres
 }
@@ -347,7 +347,7 @@ define i64 @getSExt64D() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define i16 @getE() {
-  %res = load i16, i16* @E, align 4
+  %res = load i16, ptr @E, align 4
   ret i16 %res
 }
 
@@ -363,7 +363,7 @@ define i16 @getE() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getSExtE() {
-  %res = load i16, i16* @E, align 4
+  %res = load i16, ptr @E, align 4
   %sextres = sext i16 %res to i32
   ret i32 %sextres
 }
@@ -378,7 +378,7 @@ define i32 @getSExtE() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setE(i16 %t) {
-  store i16 %t, i16* @E, align 4
+  store i16 %t, ptr @E, align 4
   ret void
 }
 
@@ -394,7 +394,7 @@ define void @setE(i16 %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExt64E() {
-  %res = load i16, i16* @E, align 4
+  %res = load i16, ptr @E, align 4
   %sextres = sext i16 %res to i64
   ret i64 %sextres
 }
@@ -413,7 +413,7 @@ define i64 @getSExt64E() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getF() {
-  %res = load i64, i64* @F, align 4
+  %res = load i64, ptr @F, align 4
   ret i64 %res
 }
 
@@ -427,7 +427,7 @@ define i64 @getF() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setF(i64 %t) {
-  store i64 %t, i64* @F, align 4
+  store i64 %t, ptr @F, align 4
   ret void
 }
 
@@ -445,7 +445,7 @@ define void @setF(i64 %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define float @getG() {
-  %res = load float, float* @G, align 4
+  %res = load float, ptr @G, align 4
   ret float %res
 }
 
@@ -459,7 +459,7 @@ define float @getG() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setG(float %t) {
-  store float %t, float* @G, align 4
+  store float %t, ptr @G, align 4
   ret void
 }
 
@@ -477,7 +477,7 @@ define void @setG(float %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define half @getH() {
-  %res = load half, half* @H, align 4
+  %res = load half, ptr @H, align 4
   ret half %res
 }
 
@@ -491,7 +491,7 @@ define half @getH() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setH(half %t) {
-  store half %t, half* @H, align 4
+  store half %t, ptr @H, align 4
   ret void
 }
 
@@ -509,7 +509,7 @@ define void @setH(half %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define double @getI() {
-  %res = load double, double* @I, align 4
+  %res = load double, ptr @I, align 4
   ret double %res
 }
 
@@ -523,7 +523,7 @@ define double @getI() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setI(double %t) {
-  store double %t, double* @I, align 4
+  store double %t, ptr @I, align 4
   ret void
 }
 
@@ -541,7 +541,7 @@ define void @setI(double %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define <2 x i32> @getJ() {
-  %res = load <2 x i32>, <2 x i32>* @J, align 4
+  %res = load <2 x i32>, ptr @J, align 4
   ret <2 x i32> %res
 }
 
@@ -555,7 +555,7 @@ define <2 x i32> @getJ() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setJ(<2 x i32> %t) {
-  store <2 x i32> %t, <2 x i32>* @J, align 4
+  store <2 x i32> %t, ptr @J, align 4
   ret void
 }
 
@@ -573,7 +573,7 @@ define void @setJ(<2 x i32> %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define <4 x i32> @getK() {
-  %res = load <4 x i32>, <4 x i32>* @K, align 4
+  %res = load <4 x i32>, ptr @K, align 4
   ret <4 x i32> %res
 }
 
@@ -587,7 +587,7 @@ define <4 x i32> @getK() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setK(<4 x i32> %t) {
-  store <4 x i32> %t, <4 x i32>* @K, align 4
+  store <4 x i32> %t, ptr @K, align 4
   ret void
 }
 
@@ -605,7 +605,7 @@ define void @setK(<4 x i32> %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define <1 x i8> @getL() {
-  %res = load <1 x i8>, <1 x i8>* @L, align 4
+  %res = load <1 x i8>, ptr @L, align 4
   ret <1 x i8> %res
 }
 
@@ -621,7 +621,7 @@ define <1 x i8> @getL() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define void @setL(<1 x i8> %t) {
-  store <1 x i8> %t, <1 x i8>* @L, align 4
+  store <1 x i8> %t, ptr @L, align 4
   ret void
 }
 
@@ -642,20 +642,20 @@ define void @setL(<1 x i8> %t) {
 ; CHECK: ext.16b v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, #1
 ; CHECK: ret
 ; CHECK: .loh AdrpLdr [[LOH_LABEL0]], [[LOH_LABEL1]]
-define void @uninterestingSub(i8* nocapture %row) #0 {
-  %tmp = bitcast i8* %row to <16 x i8>*
-  %tmp1 = load <16 x i8>, <16 x i8>* %tmp, align 16
+define void @uninterestingSub(ptr nocapture %row) #0 {
+  %tmp = bitcast ptr %row to ptr
+  %tmp1 = load <16 x i8>, ptr %tmp, align 16
   %vext43 = shufflevector <16 x i8> <i8 undef, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2>, <16 x i8> %tmp1, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
   %add.i.414 = add <16 x i8> zeroinitializer, %vext43
-  store <16 x i8> %add.i.414, <16 x i8>* %tmp, align 16
-  %add.ptr51 = getelementptr inbounds i8, i8* %row, i64 16
-  %tmp2 = bitcast i8* %add.ptr51 to <16 x i8>*
-  %tmp3 = load <16 x i8>, <16 x i8>* %tmp2, align 16
-  %tmp4 = bitcast i8* undef to <16 x i8>*
-  %tmp5 = load <16 x i8>, <16 x i8>* %tmp4, align 16
+  store <16 x i8> %add.i.414, ptr %tmp, align 16
+  %add.ptr51 = getelementptr inbounds i8, ptr %row, i64 16
+  %tmp2 = bitcast ptr %add.ptr51 to ptr
+  %tmp3 = load <16 x i8>, ptr %tmp2, align 16
+  %tmp4 = bitcast ptr undef to ptr
+  %tmp5 = load <16 x i8>, ptr %tmp4, align 16
   %vext157 = shufflevector <16 x i8> %tmp3, <16 x i8> %tmp5, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
   %add.i.402 = add <16 x i8> zeroinitializer, %vext157
-  store <16 x i8> %add.i.402, <16 x i8>* %tmp4, align 16
+  store <16 x i8> %add.i.402, ptr %tmp4, align 16
   ret void
 }
 
@@ -675,9 +675,9 @@ if.end.i:
   %mul = fmul double %x, 1.000000e-06
   %add = fadd double %mul, %mul
   %sub = fsub double %add, %add
-  call void (i8*, ...) @callee(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.89, i64 0, i64 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.90, i64 0, i64 0), double %sub)
+  call void (ptr, ...) @callee(ptr @.str.89, ptr @.str.90, double %sub)
   unreachable
 }
-declare void @callee(i8* nocapture readonly, ...) 
+declare void @callee(ptr nocapture readonly, ...) 
 
 attributes #0 = { "target-cpu"="cyclone" }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-bad-outline.mir b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-bad-outline.mir
index 94c27e1bf1aa7..ab4f0af97e324 100644
--- a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-bad-outline.mir
+++ b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog-bad-outline.mir
@@ -2,7 +2,7 @@
 #
 # This test ensure no outlined epilog is formed when X16 is live across the helper.
 --- |
-  @FuncPtr = local_unnamed_addr global i32 (i32)* null, align 8
+  @FuncPtr = local_unnamed_addr global ptr null, align 8
 
   define i32 @_Z3fooi(i32) minsize "frame-pointer"="all" {
     ret i32 0

diff  --git a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
index c414c7c2464f8..fed564afd1893 100644
--- a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
@@ -74,6 +74,6 @@ declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 ; CHECK-LINUX-NEXT: ret     x16
 
 ; nothing to check - hit assert if not bailing out for swiftasync
-define void @swift_async(i8* swiftasync %ctx) minsize "frame-pointer"="all" {
+define void @swift_async(ptr swiftasync %ctx) minsize "frame-pointer"="all" {
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ldp.ll b/llvm/test/CodeGen/AArch64/arm64-ldp.ll
index 03b7a8c8ba4f4..29d478d79b438 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldp.ll
@@ -1,35 +1,35 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s
 
-define i32 @ldp_int(i32* %p) nounwind {
+define i32 @ldp_int(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_int:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w8, w9, [x0]
 ; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %add = add nsw i32 %tmp1, %tmp
   ret i32 %add
 }
 
-define i64 @ldp_sext_int(i32* %p) nounwind {
+define i64 @ldp_sext_int(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_sext_int:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldpsw x8, x9, [x0]
 ; CHECK-NEXT:    add x0, x9, x8
 ; CHECK-NEXT:    ret
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = sext i32 %tmp to i64
   %sexttmp1 = sext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
   ret i64 %add
 }
 
-define i64 @ldp_half_sext_res0_int(i32* %p) nounwind {
+define i64 @ldp_half_sext_res0_int(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_half_sext_res0_int:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w8, w9, [x0]
@@ -37,16 +37,16 @@ define i64 @ldp_half_sext_res0_int(i32* %p) nounwind {
 ; CHECK-NEXT:    sxtw x8, w8
 ; CHECK-NEXT:    add x0, x9, x8
 ; CHECK-NEXT:    ret
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = sext i32 %tmp to i64
   %sexttmp1 = zext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
   ret i64 %add
 }
 
-define i64 @ldp_half_sext_res1_int(i32* %p) nounwind {
+define i64 @ldp_half_sext_res1_int(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_half_sext_res1_int:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w8, w9, [x0]
@@ -54,9 +54,9 @@ define i64 @ldp_half_sext_res1_int(i32* %p) nounwind {
 ; CHECK-NEXT:    sxtw x9, w9
 ; CHECK-NEXT:    add x0, x9, x8
 ; CHECK-NEXT:    ret
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = zext i32 %tmp to i64
   %sexttmp1 = sext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
@@ -64,90 +64,90 @@ define i64 @ldp_half_sext_res1_int(i32* %p) nounwind {
 }
 
 
-define i64 @ldp_long(i64* %p) nounwind {
+define i64 @ldp_long(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_long:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp x8, x9, [x0]
 ; CHECK-NEXT:    add x0, x9, x8
 ; CHECK-NEXT:    ret
-  %tmp = load i64, i64* %p, align 8
-  %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
-  %tmp1 = load i64, i64* %add.ptr, align 8
+  %tmp = load i64, ptr %p, align 8
+  %add.ptr = getelementptr inbounds i64, ptr %p, i64 1
+  %tmp1 = load i64, ptr %add.ptr, align 8
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }
 
-define float @ldp_float(float* %p) nounwind {
+define float @ldp_float(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s0, s1, [x0]
 ; CHECK-NEXT:    fadd s0, s0, s1
 ; CHECK-NEXT:    ret
-  %tmp = load float, float* %p, align 4
-  %add.ptr = getelementptr inbounds float, float* %p, i64 1
-  %tmp1 = load float, float* %add.ptr, align 4
+  %tmp = load float, ptr %p, align 4
+  %add.ptr = getelementptr inbounds float, ptr %p, i64 1
+  %tmp1 = load float, ptr %add.ptr, align 4
   %add = fadd float %tmp, %tmp1
   ret float %add
 }
 
-define double @ldp_double(double* %p) nounwind {
+define double @ldp_double(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp d0, d1, [x0]
 ; CHECK-NEXT:    fadd d0, d0, d1
 ; CHECK-NEXT:    ret
-  %tmp = load double, double* %p, align 8
-  %add.ptr = getelementptr inbounds double, double* %p, i64 1
-  %tmp1 = load double, double* %add.ptr, align 8
+  %tmp = load double, ptr %p, align 8
+  %add.ptr = getelementptr inbounds double, ptr %p, i64 1
+  %tmp1 = load double, ptr %add.ptr, align 8
   %add = fadd double %tmp, %tmp1
   ret double %add
 }
 
-define <2 x double> @ldp_doublex2(<2 x double>* %p) nounwind {
+define <2 x double> @ldp_doublex2(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_doublex2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    fadd v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %tmp = load <2 x double>, <2 x double>* %p, align 16
-  %add.ptr = getelementptr inbounds <2 x double>, <2 x double>* %p, i64 1
-  %tmp1 = load <2 x double>, <2 x double>* %add.ptr, align 16
+  %tmp = load <2 x double>, ptr %p, align 16
+  %add.ptr = getelementptr inbounds <2 x double>, ptr %p, i64 1
+  %tmp1 = load <2 x double>, ptr %add.ptr, align 16
   %add = fadd <2 x double> %tmp, %tmp1
   ret <2 x double> %add
 }
 
 ; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
-define i32 @ldur_int(i32* %a) nounwind {
+define i32 @ldur_int(ptr %a) nounwind {
 ; CHECK-LABEL: ldur_int:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w8, [x0, #-8]
 ; CHECK-NEXT:    add w0, w8, w9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i32 -1
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i32 -2
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i32 -1
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i32 -2
+  %tmp2 = load i32, ptr %p2, align 2
   %tmp3 = add i32 %tmp1, %tmp2
   ret i32 %tmp3
 }
 
-define i64 @ldur_sext_int(i32* %a) nounwind {
+define i64 @ldur_sext_int(ptr %a) nounwind {
 ; CHECK-LABEL: ldur_sext_int:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldpsw x9, x8, [x0, #-8]
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i32 -1
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i32 -2
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i32 -1
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i32 -2
+  %tmp2 = load i32, ptr %p2, align 2
   %sexttmp1 = sext i32 %tmp1 to i64
   %sexttmp2 = sext i32 %tmp2 to i64
   %tmp3 = add i64 %sexttmp1, %sexttmp2
   ret i64 %tmp3
 }
 
-define i64 @ldur_half_sext_int_res0(i32* %a) nounwind {
+define i64 @ldur_half_sext_int_res0(ptr %a) nounwind {
 ; CHECK-LABEL: ldur_half_sext_int_res0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w8, [x0, #-8]
@@ -155,17 +155,17 @@ define i64 @ldur_half_sext_int_res0(i32* %a) nounwind {
 ; CHECK-NEXT:    sxtw x9, w9
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i32 -1
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i32 -2
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i32 -1
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i32 -2
+  %tmp2 = load i32, ptr %p2, align 2
   %sexttmp1 = zext i32 %tmp1 to i64
   %sexttmp2 = sext i32 %tmp2 to i64
   %tmp3 = add i64 %sexttmp1, %sexttmp2
   ret i64 %tmp3
 }
 
-define i64 @ldur_half_sext_int_res1(i32* %a) nounwind {
+define i64 @ldur_half_sext_int_res1(ptr %a) nounwind {
 ; CHECK-LABEL: ldur_half_sext_int_res1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w8, [x0, #-8]
@@ -173,10 +173,10 @@ define i64 @ldur_half_sext_int_res1(i32* %a) nounwind {
 ; CHECK-NEXT:    sxtw x8, w8
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i32 -1
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i32 -2
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i32 -1
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i32 -2
+  %tmp2 = load i32, ptr %p2, align 2
   %sexttmp1 = sext i32 %tmp1 to i64
   %sexttmp2 = zext i32 %tmp2 to i64
   %tmp3 = add i64 %sexttmp1, %sexttmp2
@@ -184,94 +184,94 @@ define i64 @ldur_half_sext_int_res1(i32* %a) nounwind {
 }
 
 
-define i64 @ldur_long(i64* %a) nounwind ssp {
+define i64 @ldur_long(ptr %a) nounwind ssp {
 ; CHECK-LABEL: ldur_long:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp x9, x8, [x0, #-16]
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i64, i64* %a, i64 -1
-  %tmp1 = load i64, i64* %p1, align 2
-  %p2 = getelementptr inbounds i64, i64* %a, i64 -2
-  %tmp2 = load i64, i64* %p2, align 2
+  %p1 = getelementptr inbounds i64, ptr %a, i64 -1
+  %tmp1 = load i64, ptr %p1, align 2
+  %p2 = getelementptr inbounds i64, ptr %a, i64 -2
+  %tmp2 = load i64, ptr %p2, align 2
   %tmp3 = add i64 %tmp1, %tmp2
   ret i64 %tmp3
 }
 
-define float @ldur_float(float* %a) {
+define float @ldur_float(ptr %a) {
 ; CHECK-LABEL: ldur_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s1, s0, [x0, #-8]
 ; CHECK-NEXT:    fadd s0, s0, s1
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds float, float* %a, i64 -1
-  %tmp1 = load float, float* %p1, align 2
-  %p2 = getelementptr inbounds float, float* %a, i64 -2
-  %tmp2 = load float, float* %p2, align 2
+  %p1 = getelementptr inbounds float, ptr %a, i64 -1
+  %tmp1 = load float, ptr %p1, align 2
+  %p2 = getelementptr inbounds float, ptr %a, i64 -2
+  %tmp2 = load float, ptr %p2, align 2
   %tmp3 = fadd float %tmp1, %tmp2
   ret float %tmp3
 }
 
-define double @ldur_double(double* %a) {
+define double @ldur_double(ptr %a) {
 ; CHECK-LABEL: ldur_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp d1, d0, [x0, #-16]
 ; CHECK-NEXT:    fadd d0, d0, d1
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds double, double* %a, i64 -1
-  %tmp1 = load double, double* %p1, align 2
-  %p2 = getelementptr inbounds double, double* %a, i64 -2
-  %tmp2 = load double, double* %p2, align 2
+  %p1 = getelementptr inbounds double, ptr %a, i64 -1
+  %tmp1 = load double, ptr %p1, align 2
+  %p2 = getelementptr inbounds double, ptr %a, i64 -2
+  %tmp2 = load double, ptr %p2, align 2
   %tmp3 = fadd double %tmp1, %tmp2
   ret double %tmp3
 }
 
-define <2 x double> @ldur_doublex2(<2 x double>* %a) {
+define <2 x double> @ldur_doublex2(ptr %a) {
 ; CHECK-LABEL: ldur_doublex2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0, #-32]
 ; CHECK-NEXT:    fadd v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds <2 x double>, <2 x double>* %a, i64 -1
-  %tmp1 = load <2 x double>, <2 x double>* %p1, align 2
-  %p2 = getelementptr inbounds <2 x double>, <2 x double>* %a, i64 -2
-  %tmp2 = load <2 x double>, <2 x double>* %p2, align 2
+  %p1 = getelementptr inbounds <2 x double>, ptr %a, i64 -1
+  %tmp1 = load <2 x double>, ptr %p1, align 2
+  %p2 = getelementptr inbounds <2 x double>, ptr %a, i64 -2
+  %tmp2 = load <2 x double>, ptr %p2, align 2
   %tmp3 = fadd <2 x double> %tmp1, %tmp2
   ret <2 x double> %tmp3
 }
 
 ; Now check some boundary conditions
-define i64 @pairUpBarelyIn(i64* %a) nounwind ssp {
+define i64 @pairUpBarelyIn(ptr %a) nounwind ssp {
 ; CHECK-LABEL: pairUpBarelyIn:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp x9, x8, [x0, #-256]
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i64, i64* %a, i64 -31
-  %tmp1 = load i64, i64* %p1, align 2
-  %p2 = getelementptr inbounds i64, i64* %a, i64 -32
-  %tmp2 = load i64, i64* %p2, align 2
+  %p1 = getelementptr inbounds i64, ptr %a, i64 -31
+  %tmp1 = load i64, ptr %p1, align 2
+  %p2 = getelementptr inbounds i64, ptr %a, i64 -32
+  %tmp2 = load i64, ptr %p2, align 2
   %tmp3 = add i64 %tmp1, %tmp2
   ret i64 %tmp3
 }
 
-define i64 @pairUpBarelyInSext(i32* %a) nounwind ssp {
+define i64 @pairUpBarelyInSext(ptr %a) nounwind ssp {
 ; CHECK-LABEL: pairUpBarelyInSext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldpsw x9, x8, [x0, #-256]
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i64 -63
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i64 -64
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i64 -63
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i64 -64
+  %tmp2 = load i32, ptr %p2, align 2
   %sexttmp1 = sext i32 %tmp1 to i64
   %sexttmp2 = sext i32 %tmp2 to i64
   %tmp3 = add i64 %sexttmp1, %sexttmp2
   ret i64 %tmp3
 }
 
-define i64 @pairUpBarelyInHalfSextRes0(i32* %a) nounwind ssp {
+define i64 @pairUpBarelyInHalfSextRes0(ptr %a) nounwind ssp {
 ; CHECK-LABEL: pairUpBarelyInHalfSextRes0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w8, [x0, #-256]
@@ -279,17 +279,17 @@ define i64 @pairUpBarelyInHalfSextRes0(i32* %a) nounwind ssp {
 ; CHECK-NEXT:    sxtw x9, w9
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i64 -63
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i64 -64
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i64 -63
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i64 -64
+  %tmp2 = load i32, ptr %p2, align 2
   %sexttmp1 = zext i32 %tmp1 to i64
   %sexttmp2 = sext i32 %tmp2 to i64
   %tmp3 = add i64 %sexttmp1, %sexttmp2
   ret i64 %tmp3
 }
 
-define i64 @pairUpBarelyInHalfSextRes1(i32* %a) nounwind ssp {
+define i64 @pairUpBarelyInHalfSextRes1(ptr %a) nounwind ssp {
 ; CHECK-LABEL: pairUpBarelyInHalfSextRes1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w8, [x0, #-256]
@@ -297,17 +297,17 @@ define i64 @pairUpBarelyInHalfSextRes1(i32* %a) nounwind ssp {
 ; CHECK-NEXT:    sxtw x8, w8
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i64 -63
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i64 -64
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i64 -63
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i64 -64
+  %tmp2 = load i32, ptr %p2, align 2
   %sexttmp1 = sext i32 %tmp1 to i64
   %sexttmp2 = zext i32 %tmp2 to i64
   %tmp3 = add i64 %sexttmp1, %sexttmp2
   ret i64 %tmp3
 }
 
-define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
+define i64 @pairUpBarelyOut(ptr %a) nounwind ssp {
 ; Don't be fragile about which loads or manipulations of the base register
 ; are used---just check that there isn't an ldp before the add
 ; CHECK-LABEL: pairUpBarelyOut:
@@ -317,15 +317,15 @@ define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
 ; CHECK-NEXT:    ldr x8, [x8]
 ; CHECK-NEXT:    add x0, x9, x8
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i64, i64* %a, i64 -32
-  %tmp1 = load i64, i64* %p1, align 2
-  %p2 = getelementptr inbounds i64, i64* %a, i64 -33
-  %tmp2 = load i64, i64* %p2, align 2
+  %p1 = getelementptr inbounds i64, ptr %a, i64 -32
+  %tmp1 = load i64, ptr %p1, align 2
+  %p2 = getelementptr inbounds i64, ptr %a, i64 -33
+  %tmp2 = load i64, ptr %p2, align 2
   %tmp3 = add i64 %tmp1, %tmp2
   ret i64 %tmp3
 }
 
-define i64 @pairUpBarelyOutSext(i32* %a) nounwind ssp {
+define i64 @pairUpBarelyOutSext(ptr %a) nounwind ssp {
 ; Don't be fragile about which loads or manipulations of the base register
 ; are used---just check that there isn't an ldp before the add
 ; CHECK-LABEL: pairUpBarelyOutSext:
@@ -335,57 +335,57 @@ define i64 @pairUpBarelyOutSext(i32* %a) nounwind ssp {
 ; CHECK-NEXT:    ldrsw x8, [x8]
 ; CHECK-NEXT:    add x0, x9, x8
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i64 -64
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i64 -65
-  %tmp2 = load i32, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %a, i64 -64
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i64 -65
+  %tmp2 = load i32, ptr %p2, align 2
   %sexttmp1 = sext i32 %tmp1 to i64
   %sexttmp2 = sext i32 %tmp2 to i64
   %tmp3 = add i64 %sexttmp1, %sexttmp2
   ret i64 %tmp3
 }
 
-define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
+define i64 @pairUpNotAligned(ptr %a) nounwind ssp {
 ; CHECK-LABEL: pairUpNotAligned:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur x8, [x0, #-143]
 ; CHECK-NEXT:    ldur x9, [x0, #-135]
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i64, i64* %a, i64 -18
-  %bp1 = bitcast i64* %p1 to i8*
-  %bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
-  %dp1 = bitcast i8* %bp1p1 to i64*
-  %tmp1 = load i64, i64* %dp1, align 1
-
-  %p2 = getelementptr inbounds i64, i64* %a, i64 -17
-  %bp2 = bitcast i64* %p2 to i8*
-  %bp2p1 = getelementptr inbounds i8, i8* %bp2, i64 1
-  %dp2 = bitcast i8* %bp2p1 to i64*
-  %tmp2 = load i64, i64* %dp2, align 1
+  %p1 = getelementptr inbounds i64, ptr %a, i64 -18
+  %bp1 = bitcast ptr %p1 to ptr
+  %bp1p1 = getelementptr inbounds i8, ptr %bp1, i64 1
+  %dp1 = bitcast ptr %bp1p1 to ptr
+  %tmp1 = load i64, ptr %dp1, align 1
+
+  %p2 = getelementptr inbounds i64, ptr %a, i64 -17
+  %bp2 = bitcast ptr %p2 to ptr
+  %bp2p1 = getelementptr inbounds i8, ptr %bp2, i64 1
+  %dp2 = bitcast ptr %bp2p1 to ptr
+  %tmp2 = load i64, ptr %dp2, align 1
 
   %tmp3 = add i64 %tmp1, %tmp2
   ret i64 %tmp3
 }
 
-define i64 @pairUpNotAlignedSext(i32* %a) nounwind ssp {
+define i64 @pairUpNotAlignedSext(ptr %a) nounwind ssp {
 ; CHECK-LABEL: pairUpNotAlignedSext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldursw x8, [x0, #-71]
 ; CHECK-NEXT:    ldursw x9, [x0, #-67]
 ; CHECK-NEXT:    add x0, x8, x9
 ; CHECK-NEXT:    ret
-  %p1 = getelementptr inbounds i32, i32* %a, i64 -18
-  %bp1 = bitcast i32* %p1 to i8*
-  %bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
-  %dp1 = bitcast i8* %bp1p1 to i32*
-  %tmp1 = load i32, i32* %dp1, align 1
-
-  %p2 = getelementptr inbounds i32, i32* %a, i64 -17
-  %bp2 = bitcast i32* %p2 to i8*
-  %bp2p1 = getelementptr inbounds i8, i8* %bp2, i64 1
-  %dp2 = bitcast i8* %bp2p1 to i32*
-  %tmp2 = load i32, i32* %dp2, align 1
+  %p1 = getelementptr inbounds i32, ptr %a, i64 -18
+  %bp1 = bitcast ptr %p1 to ptr
+  %bp1p1 = getelementptr inbounds i8, ptr %bp1, i64 1
+  %dp1 = bitcast ptr %bp1p1 to ptr
+  %tmp1 = load i32, ptr %dp1, align 1
+
+  %p2 = getelementptr inbounds i32, ptr %a, i64 -17
+  %bp2 = bitcast ptr %p2 to ptr
+  %bp2p1 = getelementptr inbounds i8, ptr %bp2, i64 1
+  %dp2 = bitcast ptr %bp2p1 to ptr
+  %tmp2 = load i32, ptr %dp2, align 1
 
   %sexttmp1 = sext i32 %tmp1 to i64
   %sexttmp2 = sext i32 %tmp2 to i64
@@ -393,9 +393,9 @@ define i64 @pairUpNotAlignedSext(i32* %a) nounwind ssp {
  ret i64 %tmp3
 }
 
-declare void @use-ptr(i32*)
+declare void @use-ptr(ptr)
 
-define i64 @ldp_sext_int_pre(i32* %p) nounwind {
+define i64 @ldp_sext_int_pre(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_sext_int_pre:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -406,19 +406,19 @@ define i64 @ldp_sext_int_pre(i32* %p) nounwind {
 ; CHECK-NEXT:    add x0, x9, x8
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %p, i64 2
-  call void @use-ptr(i32* %ptr)
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 0
-  %tmp = load i32, i32* %add.ptr, align 4
-  %add.ptr1 = getelementptr inbounds i32, i32* %ptr, i64 1
-  %tmp1 = load i32, i32* %add.ptr1, align 4
+  %ptr = getelementptr inbounds i32, ptr %p, i64 2
+  call void @use-ptr(ptr %ptr)
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 0
+  %tmp = load i32, ptr %add.ptr, align 4
+  %add.ptr1 = getelementptr inbounds i32, ptr %ptr, i64 1
+  %tmp1 = load i32, ptr %add.ptr1, align 4
   %sexttmp = sext i32 %tmp to i64
   %sexttmp1 = sext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
   ret i64 %add
 }
 
-define i64 @ldp_sext_int_post(i32* %p) nounwind {
+define i64 @ldp_sext_int_post(ptr %p) nounwind {
 ; CHECK-LABEL: ldp_sext_int_post:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
@@ -429,13 +429,13 @@ define i64 @ldp_sext_int_post(i32* %p) nounwind {
 ; CHECK-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = sext i32 %tmp to i64
   %sexttmp1 = sext i32 %tmp1 to i64
-  %ptr = getelementptr inbounds i32, i32* %add.ptr, i64 1
-  call void @use-ptr(i32* %ptr)
+  %ptr = getelementptr inbounds i32, ptr %add.ptr, i64 1
+  call void @use-ptr(ptr %ptr)
   %add = add nsw i64 %sexttmp1, %sexttmp
   ret i64 %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
index a4efc4d360ed1..4cdc6cc117bb8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
@@ -24,55 +24,55 @@ entry:
   %i = alloca i32, align 4
   %xx = alloca i32, align 4
   %yy = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = bitcast [8 x i32]* %x to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ([8 x i32]* @main.x to i8*), i64 32, i1 false)
-  %1 = bitcast [8 x i32]* %y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast ([8 x i32]* @main.y to i8*), i64 32, i1 false)
-  store i32 0, i32* %xx, align 4
-  store i32 0, i32* %yy, align 4
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %retval
+  %0 = bitcast ptr %x to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 @main.x, i64 32, i1 false)
+  %1 = bitcast ptr %y to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %1, ptr align 4 @main.y, i64 32, i1 false)
+  store i32 0, ptr %xx, align 4
+  store i32 0, ptr %yy, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %2, 8
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %3 = load i32, i32* %i, align 4
+  %3 = load i32, ptr %i, align 4
   %idxprom = sext i32 %3 to i64
-  %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom
-  %4 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [8 x i32], ptr %x, i32 0, i64 %idxprom
+  %4 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %4, 1
-  store i32 %add, i32* %xx, align 4
-  %5 = load i32, i32* %xx, align 4
+  store i32 %add, ptr %xx, align 4
+  %5 = load i32, ptr %xx, align 4
   %add1 = add nsw i32 %5, 12
-  store i32 %add1, i32* %xx, align 4
-  %6 = load i32, i32* %xx, align 4
+  store i32 %add1, ptr %xx, align 4
+  %6 = load i32, ptr %xx, align 4
   %add2 = add nsw i32 %6, 23
-  store i32 %add2, i32* %xx, align 4
-  %7 = load i32, i32* %xx, align 4
+  store i32 %add2, ptr %xx, align 4
+  %7 = load i32, ptr %xx, align 4
   %add3 = add nsw i32 %7, 34
-  store i32 %add3, i32* %xx, align 4
-  %8 = load i32, i32* %i, align 4
+  store i32 %add3, ptr %xx, align 4
+  %8 = load i32, ptr %i, align 4
   %idxprom4 = sext i32 %8 to i64
-  %arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4
-  %9 = load i32, i32* %arrayidx5, align 4
-  %10 = load i32, i32* %yy, align 4
+  %arrayidx5 = getelementptr inbounds [8 x i32], ptr %y, i32 0, i64 %idxprom4
+  %9 = load i32, ptr %arrayidx5, align 4
+  %10 = load i32, ptr %yy, align 4
   %mul = mul nsw i32 %10, %9
-  store i32 %mul, i32* %yy, align 4
+  store i32 %mul, ptr %yy, align 4
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %11 = load i32, i32* %i, align 4
+  %11 = load i32, ptr %i, align 4
   %inc = add nsw i32 %11, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
-  %12 = load i32, i32* %xx, align 4
-  %13 = load i32, i32* %yy, align 4
+  %12 = load i32, ptr %xx, align 4
+  %13 = load i32, ptr %yy, align 4
   %add6 = add nsw i32 %12, %13
   ret i32 %add6
 }
@@ -105,7 +105,7 @@ define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) {
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind }
@@ -115,14 +115,14 @@ attributes #1 = { nounwind }
 ;   [ARM64] Cortex-a53 schedule mode can't handle NEON post-increment load
 ;
 ; Nothing explicit to check other than llc not crashing.
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(i8* %A, i8** %ptr) {
-  %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  store i8* %tmp, i8** %ptr
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(ptr %A, ptr %ptr) {
+  %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld2
 }
 
-declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr)
 
 ; Regression Test for PR20057.
 ;
@@ -133,7 +133,7 @@ declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
 ; CHECK: *** Final schedule for %bb.0 ***
 ; CHECK: BRK
 ; CHECK: ********** INTERVALS **********
-define void @testResourceConflict(float* %ptr) {
+define void @testResourceConflict(ptr %ptr) {
 entry:
   %add1 = fadd float undef, undef
   %mul2 = fmul float undef, undef
@@ -152,22 +152,22 @@ entry:
   %mul13 = fmul float %add1, %mul9
   %mul21 = fmul float %add5, %mul11
   %add22 = fadd float %mul13, %mul21
-  store float %add22, float* %ptr, align 4
+  store float %add22, ptr %ptr, align 4
   %mul28 = fmul float %add1, %mul10
   %mul33 = fmul float %add5, %mul12
   %add34 = fadd float %mul33, %mul28
-  store float %add34, float* %ptr, align 4
+  store float %add34, ptr %ptr, align 4
   %mul240 = fmul float undef, %mul9
   %add246 = fadd float %mul240, undef
-  store float %add246, float* %ptr, align 4
+  store float %add246, ptr %ptr, align 4
   %mul52 = fmul float undef, %mul10
   %mul57 = fmul float undef, %mul12
   %add58 = fadd float %mul57, %mul52
-  store float %add58, float* %ptr, align 4
+  store float %add58, ptr %ptr, align 4
   %mul27 = fmul float 0.000000e+00, %mul9
   %mul81 = fmul float undef, %mul10
   %add82 = fadd float %mul27, %mul81
-  store float %add82, float* %ptr, align 4
+  store float %add82, ptr %ptr, align 4
   call void @llvm.trap()
   unreachable
 }
@@ -187,18 +187,18 @@ entry:
   br label %loop
 
 loop:
-  %0 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8* null)
-  %ptr = bitcast i8* undef to <2 x i64>*
-  store <2 x i64> %v, <2 x i64>* %ptr, align 4
-  %ptr1 = bitcast i8* undef to <2 x i64>*
-  store <2 x i64> %v, <2 x i64>* %ptr1, align 4
-  %ptr2 = bitcast i8* undef to <2 x i64>*
-  store <2 x i64> %v, <2 x i64>* %ptr2, align 4
-  %ptr3 = bitcast i8* undef to <2 x i64>*
-  store <2 x i64> %v, <2 x i64>* %ptr3, align 4
-  %ptr4 = bitcast i8* undef to <2 x i64>*
-  store <2 x i64> %v, <2 x i64>* %ptr4, align 4
+  %0 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr null)
+  %ptr = bitcast ptr undef to ptr
+  store <2 x i64> %v, ptr %ptr, align 4
+  %ptr1 = bitcast ptr undef to ptr
+  store <2 x i64> %v, ptr %ptr1, align 4
+  %ptr2 = bitcast ptr undef to ptr
+  store <2 x i64> %v, ptr %ptr2, align 4
+  %ptr3 = bitcast ptr undef to ptr
+  store <2 x i64> %v, ptr %ptr3, align 4
+  %ptr4 = bitcast ptr undef to ptr
+  store <2 x i64> %v, ptr %ptr4, align 4
   br label %loop
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i8(i8*)
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
index fc497233881df..82b34efab6834 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
@@ -30,42 +30,42 @@ entry:
   %i = alloca i32, align 4
   %xx = alloca i32, align 4
   %yy = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = bitcast [8 x i32]* %x to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast ([8 x i32]* @main.x to i8*), i64 32, i1 false)
-  %1 = bitcast [8 x i32]* %y to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast ([8 x i32]* @main.y to i8*), i64 32, i1 false)
-  store i32 0, i32* %xx, align 4
-  store i32 0, i32* %yy, align 4
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %retval
+  %0 = bitcast ptr %x to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 @main.x, i64 32, i1 false)
+  %1 = bitcast ptr %y to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %1, ptr align 4 @main.y, i64 32, i1 false)
+  store i32 0, ptr %xx, align 4
+  store i32 0, ptr %yy, align 4
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %2, 8
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  %3 = load i32, i32* %yy, align 4
-  %4 = load i32, i32* %i, align 4
+  %3 = load i32, ptr %yy, align 4
+  %4 = load i32, ptr %i, align 4
   %idxprom = sext i32 %4 to i64
-  %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom
-  %5 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [8 x i32], ptr %x, i32 0, i64 %idxprom
+  %5 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %5, 1
-  store i32 %add, i32* %xx, align 4
-  %6 = load i32, i32* %xx, align 4
+  store i32 %add, ptr %xx, align 4
+  %6 = load i32, ptr %xx, align 4
   %add1 = add nsw i32 %6, 12
-  store i32 %add1, i32* %xx, align 4
-  %7 = load i32, i32* %xx, align 4
+  store i32 %add1, ptr %xx, align 4
+  %7 = load i32, ptr %xx, align 4
   %add2 = add nsw i32 %7, 23
-  store i32 %add2, i32* %xx, align 4
-  %8 = load i32, i32* %xx, align 4
+  store i32 %add2, ptr %xx, align 4
+  %8 = load i32, ptr %xx, align 4
   %add3 = add nsw i32 %8, 34
-  store i32 %add3, i32* %xx, align 4
-  %9 = load i32, i32* %i, align 4
+  store i32 %add3, ptr %xx, align 4
+  %9 = load i32, ptr %i, align 4
   %idxprom4 = sext i32 %9 to i64
-  %arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4
-  %10 = load i32, i32* %arrayidx5, align 4
+  %arrayidx5 = getelementptr inbounds [8 x i32], ptr %y, i32 0, i64 %idxprom4
+  %10 = load i32, ptr %arrayidx5, align 4
 
   %add4 = add nsw i32 %9, %add
   %add5 = add nsw i32 %10, %add1
@@ -83,30 +83,30 @@ for.body:                                         ; preds = %for.cond
   %add14 = add nsw i32 %10, %add10
   %add15 = add nsw i32 %add13, %add14
 
-  store i32 %add15, i32* %xx, align 4
+  store i32 %add15, ptr %xx, align 4
 
   %div = sdiv i32 %4, %5
 
-  store i32 %div, i32* %yy, align 4
+  store i32 %div, ptr %yy, align 4
 
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %11 = load i32, i32* %i, align 4
+  %11 = load i32, ptr %i, align 4
   %inc = add nsw i32 %11, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
-  %12 = load i32, i32* %xx, align 4
-  %13 = load i32, i32* %yy, align 4
+  %12 = load i32, ptr %xx, align 4
+  %13 = load i32, ptr %yy, align 4
   %add67 = add nsw i32 %12, %13
   ret i32 %add67
 }
 
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-non-pow2-ldst.ll b/llvm/test/CodeGen/AArch64/arm64-non-pow2-ldst.ll
index d48c3d437d620..c2ec0502d83bd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-non-pow2-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-non-pow2-ldst.ll
@@ -8,7 +8,7 @@ define i24 @ldi24(ptr %p) nounwind {
 ; CHECK-NEXT:    ldrh w9, [x0]
 ; CHECK-NEXT:    orr w0, w9, w8, lsl #16
 ; CHECK-NEXT:    ret
-    %r = load i24, i24* %p
+    %r = load i24, ptr %p
     ret i24 %r
 }
 
@@ -21,7 +21,7 @@ define i56 @ldi56(ptr %p) nounwind {
 ; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    orr x0, x9, x8, lsl #32
 ; CHECK-NEXT:    ret
-    %r = load i56, i56* %p
+    %r = load i56, ptr %p
     ret i56 %r
 }
 
@@ -32,7 +32,7 @@ define i80 @ldi80(ptr %p) nounwind {
 ; CHECK-NEXT:    ldrh w1, [x0, #8]
 ; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
-    %r = load i80, i80* %p
+    %r = load i80, ptr %p
     ret i80 %r
 }
 
@@ -46,7 +46,7 @@ define i120 @ldi120(ptr %p) nounwind {
 ; CHECK-NEXT:    ldr x0, [x0]
 ; CHECK-NEXT:    orr x1, x9, x8, lsl #32
 ; CHECK-NEXT:    ret
-    %r = load i120, i120* %p
+    %r = load i120, ptr %p
     ret i120 %r
 }
 
@@ -60,7 +60,7 @@ define i280 @ldi280(ptr %p) nounwind {
 ; CHECK-NEXT:    orr x4, x10, x9, lsl #16
 ; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
-    %r = load i280, i280* %p
+    %r = load i280, ptr %p
     ret i280 %r
 }
 
@@ -71,7 +71,7 @@ define void @sti24(ptr %p, i24 %a) nounwind {
 ; CHECK-NEXT:    strh w1, [x0]
 ; CHECK-NEXT:    strb w8, [x0, #2]
 ; CHECK-NEXT:    ret
-    store i24 %a, i24* %p
+    store i24 %a, ptr %p
     ret void
 }
 
@@ -84,7 +84,7 @@ define void @sti56(ptr %p, i56 %a) nounwind {
 ; CHECK-NEXT:    strb w8, [x0, #6]
 ; CHECK-NEXT:    strh w9, [x0, #4]
 ; CHECK-NEXT:    ret
-    store i56 %a, i56* %p
+    store i56 %a, ptr %p
     ret void
 }
 
@@ -94,7 +94,7 @@ define void @sti80(ptr %p, i80 %a) nounwind {
 ; CHECK-NEXT:    str x2, [x0]
 ; CHECK-NEXT:    strh w3, [x0, #8]
 ; CHECK-NEXT:    ret
-    store i80 %a, i80* %p
+    store i80 %a, ptr %p
     ret void
 }
 
@@ -108,7 +108,7 @@ define void @sti120(ptr %p, i120 %a) nounwind {
 ; CHECK-NEXT:    strb w8, [x0, #14]
 ; CHECK-NEXT:    strh w9, [x0, #12]
 ; CHECK-NEXT:    ret
-    store i120 %a, i120* %p
+    store i120 %a, ptr %p
     ret void
 }
 
@@ -121,7 +121,7 @@ define void @sti280(ptr %p, i280 %a) nounwind {
 ; CHECK-NEXT:    strh w6, [x0, #32]
 ; CHECK-NEXT:    strb w8, [x0, #34]
 ; CHECK-NEXT:    ret
-    store i280 %a, i280* %p
+    store i280 %a, ptr %p
     ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll b/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
index 9fc29f20c2471..778f4e2f9ec01 100644
--- a/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
@@ -48,9 +48,9 @@ entry:
 
 
   call preserve_allcc void @preserve_all()
-  %0 = load i32, i32* %v, align 4
+  %0 = load i32, ptr %v, align 4
   %1 = call i32 asm sideeffect "mov ${0:w}, w9", "=r,r"(i32 %0) #2
   %2 = call i32 asm sideeffect "fneg v9.4s, v9.4s", "=r,~{v9}"() #2
-  store i32 %1, i32* %v, align 4
+  store i32 %1, ptr %v, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll
index c6e3c3540f6e9..c707265c06c5c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zip.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vzipi8(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipi8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -10,15 +10,15 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ; CHECK-NEXT:    zip2.8b v0, v0, v1
 ; CHECK-NEXT:    add.8b v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   %tmp5 = add <8 x i8> %tmp3, %tmp4
   ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @vzipi16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipi16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -27,15 +27,15 @@ define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ; CHECK-NEXT:    zip2.4h v0, v0, v1
 ; CHECK-NEXT:    add.4h v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
   %tmp5 = add <4 x i16> %tmp3, %tmp4
   ret <4 x i16> %tmp5
 }
 
-define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vzipQi8(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipQi8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -44,15 +44,15 @@ define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ; CHECK-NEXT:    zip2.16b v0, v0, v1
 ; CHECK-NEXT:    add.16b v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
   %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
   %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   %tmp5 = add <16 x i8> %tmp3, %tmp4
   ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vzipQi16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipQi16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -61,15 +61,15 @@ define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ; CHECK-NEXT:    zip2.8h v0, v0, v1
 ; CHECK-NEXT:    add.8h v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   %tmp5 = add <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @vzipQi32(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipQi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -78,15 +78,15 @@ define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ; CHECK-NEXT:    zip2.4s v0, v0, v1
 ; CHECK-NEXT:    add.4s v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @vzipQf(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipQf:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -95,8 +95,8 @@ define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
 ; CHECK-NEXT:    zip2.4s v0, v0, v1
 ; CHECK-NEXT:    fadd.4s v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
   %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
   %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -105,7 +105,7 @@ define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
 
 ; Undef shuffle indices should not prevent matching to VZIP:
 
-define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vzipi8_undef(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipi8_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -114,15 +114,15 @@ define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ; CHECK-NEXT:    zip2.8b v0, v0, v1
 ; CHECK-NEXT:    add.8b v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 1, i32 9, i32 undef, i32 10, i32 3, i32 11>
   %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 undef, i32 undef, i32 15>
   %tmp5 = add <8 x i8> %tmp3, %tmp4
   ret <8 x i8> %tmp5
 }
 
-define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vzipQi8_undef(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: vzipQi8_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -131,8 +131,8 @@ define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ; CHECK-NEXT:    zip2.16b v0, v0, v1
 ; CHECK-NEXT:    add.16b v0, v2, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
   %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 undef, i32 undef, i32 undef, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
   %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 undef, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 undef, i32 14, i32 30, i32 undef, i32 31>
   %tmp5 = add <16 x i8> %tmp3, %tmp4

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-block-size.mir b/llvm/test/CodeGen/AArch64/branch-relax-block-size.mir
index ec6900cc970c7..2684a550d9215 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-block-size.mir
+++ b/llvm/test/CodeGen/AArch64/branch-relax-block-size.mir
@@ -13,14 +13,14 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64--linux-gnu"
 
-  define i32 @test(i32* %a) #0 {
+  define i32 @test(ptr %a) #0 {
   entry:
-    %call = tail call i32 @validate(i32* %a)
+    %call = tail call i32 @validate(ptr %a)
     %tobool = icmp eq i32 %call, 0
     br i1 %tobool, label %return, label %if.then
 
   if.then:                                          ; preds = %entry
-    %0 = load i32, i32* %a, align 4
+    %0 = load i32, ptr %a, align 4
     br label %return
 
   return:                                           ; preds = %entry, %if.then
@@ -28,7 +28,7 @@
     ret i32 %retval.0
   }
 
-  declare i32 @validate(i32*)
+  declare i32 @validate(ptr)
 
   attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "frame-pointer"="all" }
 

diff  --git a/llvm/test/CodeGen/AArch64/compute-call-frame-size-unreachable-pass.ll b/llvm/test/CodeGen/AArch64/compute-call-frame-size-unreachable-pass.ll
index 6e2bc6e856008..68d66b0c914c2 100644
--- a/llvm/test/CodeGen/AArch64/compute-call-frame-size-unreachable-pass.ll
+++ b/llvm/test/CodeGen/AArch64/compute-call-frame-size-unreachable-pass.ll
@@ -3,8 +3,8 @@
 ; This tests that the MFI assert in unreachableblockelim pass
 ; does not trigger
 
-%struct.ngtcp2_crypto_aead = type { i8*, i64 }
-%struct.ngtcp2_crypto_aead_ctx = type { i8* }
+%struct.ngtcp2_crypto_aead = type { ptr, i64 }
+%struct.ngtcp2_crypto_aead_ctx = type { ptr }
 
 ; Function Attrs: noinline optnone
 define internal fastcc void @decrypt_pkt() unnamed_addr #0 !type !0 !type !1 {
@@ -15,7 +15,7 @@ trap:                                             ; preds = %entry
   unreachable, !nosanitize !2
 
 cont:                                             ; preds = %entry
-  %call = call i32 undef(i8* undef, %struct.ngtcp2_crypto_aead* undef, %struct.ngtcp2_crypto_aead_ctx* undef, i8* undef, i64 undef, i8* undef, i64 undef, i8* undef, i64 undef)
+  %call = call i32 undef(ptr undef, ptr undef, ptr undef, ptr undef, i64 undef, ptr undef, i64 undef, ptr undef, i64 undef)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll b/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll
index e7c85281459fa..1c254f9ed935d 100644
--- a/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll
+++ b/llvm/test/CodeGen/AArch64/concat_vector-truncate-combine.ll
@@ -93,7 +93,7 @@ entry:
 
 ; The concat_vectors operation in this test is introduced when splitting
 ; the fptrunc operation due to the split <vscale x 4 x double> input operand.
-define void @test_concat_fptrunc_v4f64_to_v4f32(<vscale x 4 x float>* %ptr) #1 {
+define void @test_concat_fptrunc_v4f64_to_v4f32(ptr %ptr) #1 {
 ; CHECK-LABEL: test_concat_fptrunc_v4f64_to_v4f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
@@ -103,7 +103,7 @@ define void @test_concat_fptrunc_v4f64_to_v4f32(<vscale x 4 x float>* %ptr) #1 {
 entry:
   %0 = shufflevector <vscale x 4 x double> insertelement (<vscale x 4 x double> poison, double 1.000000e+00, i32 0), <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
   %1 = fptrunc <vscale x 4 x double> %0 to <vscale x 4 x float>
-  store <vscale x 4 x float> %1, <vscale x 4 x float>* %ptr, align 4
+  store <vscale x 4 x float> %1, ptr %ptr, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll b/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
index ac6fd776106c8..75156225f1d86 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
@@ -7,12 +7,12 @@
 declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
 declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
-define void @foo(<vscale x 4 x i32>* nocapture dereferenceable(16) %ptr) {
+define void @foo(ptr nocapture dereferenceable(16) %ptr) {
 entry:
   %tmp = alloca <vscale x 4 x i32>, align 8
-  %tmp_ptr = bitcast <vscale x 4 x i32>* %tmp to ptr
+  %tmp_ptr = bitcast ptr %tmp to ptr
   call void @llvm.lifetime.start.p0(i64 32, ptr %tmp_ptr)
-  store <vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr
+  store <vscale x 4 x i32> undef, ptr %ptr
   call void @llvm.lifetime.end.p0(i64 32, ptr %tmp_ptr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll b/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll
index f1bca142bd18a..5763e4f43c1d2 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-trunc-build-vec.ll
@@ -25,8 +25,8 @@ define void @no_combine(i32 %p) local_unnamed_addr {
   %2 = shufflevector <16 x i32> %1, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 undef, i32 undef>
   %3 = shufflevector <16 x i32> %2, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 4, i32 4, i32 4, i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 4, i32 4, i32 4>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 28, i32 29, i32 30, i32 31>
   %4 = trunc <16 x i32> %3 to <16 x i8>
-  %5 = bitcast i8* undef to <16 x i8>*
-  store <16 x i8> %4, <16 x i8>* %5, align 1
+  %5 = bitcast ptr undef to ptr
+  store <16 x i8> %4, ptr %5, align 1
   ret void
 }
 
@@ -40,7 +40,7 @@ define void @do_combine(i32 %p) local_unnamed_addr {
   %1 = insertelement <16 x i32> undef, i32 %p, i32 0
   %2 = shufflevector <16 x i32> %1, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 undef, i32 undef>
   %3 = trunc <16 x i32> %2 to <16 x i8>
-  %4 = bitcast i8* undef to <16 x i8>*
-  store <16 x i8> %3, <16 x i8>* %4, align 1
+  %4 = bitcast ptr undef to ptr
+  store <16 x i8> %3, ptr %4, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
index 22cff36afaf7f..d44d45ea03b6b 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
@@ -43,39 +43,39 @@
     %localv1 = alloca <vscale x 4 x i32>, align 16
     %localp0 = alloca <vscale x 16 x i1>, align 2
     %localp1 = alloca <vscale x 16 x i1>, align 2
-    store <vscale x 4 x i32> %z0, <vscale x 4 x i32>* %z0.addr, align 16
-    call void @llvm.dbg.declare(metadata <vscale x 4 x i32>* %z0.addr, metadata !29, metadata !DIExpression()), !dbg !30
-    store <vscale x 4 x i32> %z1, <vscale x 4 x i32>* %z1.addr, align 16
-    call void @llvm.dbg.declare(metadata <vscale x 4 x i32>* %z1.addr, metadata !31, metadata !DIExpression()), !dbg !32
-    store <vscale x 16 x i1> %p0, <vscale x 16 x i1>* %p0.addr, align 2
-    call void @llvm.dbg.declare(metadata <vscale x 16 x i1>* %p0.addr, metadata !33, metadata !DIExpression()), !dbg !34
-    store <vscale x 16 x i1> %p1, <vscale x 16 x i1>* %p1.addr, align 2
-    call void @llvm.dbg.declare(metadata <vscale x 16 x i1>* %p1.addr, metadata !35, metadata !DIExpression()), !dbg !36
-    store i32 %w0, i32* %w0.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %w0.addr, metadata !37, metadata !DIExpression()), !dbg !38
-    call void @llvm.dbg.declare(metadata i32* %local_gpr0, metadata !39, metadata !DIExpression()), !dbg !40
-    %0 = load i32, i32* %w0.addr, align 4, !dbg !41
-    store i32 %0, i32* %local_gpr0, align 4, !dbg !40
-    call void @llvm.dbg.declare(metadata <vscale x 4 x i32>* %localv0, metadata !42, metadata !DIExpression()), !dbg !43
-    %1 = load <vscale x 4 x i32>, <vscale x 4 x i32>* %z0.addr, align 16, !dbg !44
-    store <vscale x 4 x i32> %1, <vscale x 4 x i32>* %localv0, align 16, !dbg !43
-    call void @llvm.dbg.declare(metadata <vscale x 4 x i32>* %localv1, metadata !45, metadata !DIExpression()), !dbg !46
-    %2 = load <vscale x 4 x i32>, <vscale x 4 x i32>* %z1.addr, align 16, !dbg !47
-    store <vscale x 4 x i32> %2, <vscale x 4 x i32>* %localv1, align 16, !dbg !46
-    call void @llvm.dbg.declare(metadata <vscale x 16 x i1>* %localp0, metadata !48, metadata !DIExpression()), !dbg !49
-    %3 = load <vscale x 16 x i1>, <vscale x 16 x i1>* %p0.addr, align 2, !dbg !50
-    store <vscale x 16 x i1> %3, <vscale x 16 x i1>* %localp0, align 2, !dbg !49
-    call void @llvm.dbg.declare(metadata <vscale x 16 x i1>* %localp1, metadata !51, metadata !DIExpression()), !dbg !52
-    %4 = load <vscale x 16 x i1>, <vscale x 16 x i1>* %p1.addr, align 2, !dbg !53
-    store <vscale x 16 x i1> %4, <vscale x 16 x i1>* %localp1, align 2, !dbg !52
-    %call = call <vscale x 4 x i32> @bar(i32* %local_gpr0, <vscale x 4 x i32>* %localv0, <vscale x 4 x i32>* %localv1, <vscale x 16 x i1>* %localp0, <vscale x 16 x i1>* %localp1), !dbg !54
+    store <vscale x 4 x i32> %z0, ptr %z0.addr, align 16
+    call void @llvm.dbg.declare(metadata ptr %z0.addr, metadata !29, metadata !DIExpression()), !dbg !30
+    store <vscale x 4 x i32> %z1, ptr %z1.addr, align 16
+    call void @llvm.dbg.declare(metadata ptr %z1.addr, metadata !31, metadata !DIExpression()), !dbg !32
+    store <vscale x 16 x i1> %p0, ptr %p0.addr, align 2
+    call void @llvm.dbg.declare(metadata ptr %p0.addr, metadata !33, metadata !DIExpression()), !dbg !34
+    store <vscale x 16 x i1> %p1, ptr %p1.addr, align 2
+    call void @llvm.dbg.declare(metadata ptr %p1.addr, metadata !35, metadata !DIExpression()), !dbg !36
+    store i32 %w0, ptr %w0.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %w0.addr, metadata !37, metadata !DIExpression()), !dbg !38
+    call void @llvm.dbg.declare(metadata ptr %local_gpr0, metadata !39, metadata !DIExpression()), !dbg !40
+    %0 = load i32, ptr %w0.addr, align 4, !dbg !41
+    store i32 %0, ptr %local_gpr0, align 4, !dbg !40
+    call void @llvm.dbg.declare(metadata ptr %localv0, metadata !42, metadata !DIExpression()), !dbg !43
+    %1 = load <vscale x 4 x i32>, ptr %z0.addr, align 16, !dbg !44
+    store <vscale x 4 x i32> %1, ptr %localv0, align 16, !dbg !43
+    call void @llvm.dbg.declare(metadata ptr %localv1, metadata !45, metadata !DIExpression()), !dbg !46
+    %2 = load <vscale x 4 x i32>, ptr %z1.addr, align 16, !dbg !47
+    store <vscale x 4 x i32> %2, ptr %localv1, align 16, !dbg !46
+    call void @llvm.dbg.declare(metadata ptr %localp0, metadata !48, metadata !DIExpression()), !dbg !49
+    %3 = load <vscale x 16 x i1>, ptr %p0.addr, align 2, !dbg !50
+    store <vscale x 16 x i1> %3, ptr %localp0, align 2, !dbg !49
+    call void @llvm.dbg.declare(metadata ptr %localp1, metadata !51, metadata !DIExpression()), !dbg !52
+    %4 = load <vscale x 16 x i1>, ptr %p1.addr, align 2, !dbg !53
+    store <vscale x 16 x i1> %4, ptr %localp1, align 2, !dbg !52
+    %call = call <vscale x 4 x i32> @bar(ptr %local_gpr0, ptr %localv0, ptr %localv1, ptr %localp0, ptr %localp1), !dbg !54
     ret <vscale x 4 x i32> %call, !dbg !55
   }
 
   ; Function Attrs: nounwind readnone speculatable willreturn
   declare void @llvm.dbg.declare(metadata, metadata, metadata)
 
-  declare dso_local <vscale x 4 x i32> @bar(i32*, <vscale x 4 x i32>*, <vscale x 4 x i32>*, <vscale x 16 x i1>*, <vscale x 16 x i1>*)
+  declare dso_local <vscale x 4 x i32> @bar(ptr, ptr, ptr, ptr, ptr)
 
   attributes #0 = { "frame-pointer"="non-leaf" "target-features"="+neon,+sve" }
 

diff  --git a/llvm/test/CodeGen/AArch64/divrem.ll b/llvm/test/CodeGen/AArch64/divrem.ll
index 9f648eb63eac1..5cd7e098d00bb 100644
--- a/llvm/test/CodeGen/AArch64/divrem.ll
+++ b/llvm/test/CodeGen/AArch64/divrem.ll
@@ -7,16 +7,16 @@ define <2 x i32> @test_udivrem(<2 x i32> %x, < 2 x i32> %y, < 2 x i32>* %z) {
 ; CHECK-DAG: udivrem
 ; CHECK-NOT: LLVM ERROR: Cannot select
   %div = udiv <2 x i32> %x, %y
-  store <2 x i32> %div, <2 x i32>* %z
+  store <2 x i32> %div, ptr %z
   %1 = urem <2 x i32> %x, %y
   ret <2 x i32> %1
 }
 
-define <4 x i32> @test_sdivrem(<4 x i32> %x,  <4 x i32>* %y) {
+define <4 x i32> @test_sdivrem(<4 x i32> %x,  ptr %y) {
 ; CHECK-LABEL: test_sdivrem
 ; CHECK-DAG: sdivrem
   %div = sdiv <4 x i32> %x,  < i32 20, i32 20, i32 20, i32 20 >
-  store <4 x i32> %div, <4 x i32>* %y
+  store <4 x i32> %div, ptr %y
   %1 = srem <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 >
   ret <4 x i32> %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/dont-shrink-wrap-stack-mayloadorstore.mir b/llvm/test/CodeGen/AArch64/dont-shrink-wrap-stack-mayloadorstore.mir
index 25e8c109c3995..1c4447bffd872 100644
--- a/llvm/test/CodeGen/AArch64/dont-shrink-wrap-stack-mayloadorstore.mir
+++ b/llvm/test/CodeGen/AArch64/dont-shrink-wrap-stack-mayloadorstore.mir
@@ -46,7 +46,7 @@
 
 
  ; Test from: https://bugs.llvm.org/show_bug.cgi?id=37472
-  define i32 @f(%struct.S* nocapture %arg, i32 %arg1) {
+  define i32 @f(ptr nocapture %arg, i32 %arg1) {
   bb:
     %tmp = alloca [4 x i8], align 1
     %tmp6 = getelementptr inbounds [4 x i8], ptr %tmp, i64 0, i64 3

diff  --git a/llvm/test/CodeGen/AArch64/early-ifcvt-regclass-mismatch.mir b/llvm/test/CodeGen/AArch64/early-ifcvt-regclass-mismatch.mir
index 66b70b15ae141..318bdceeaef41 100644
--- a/llvm/test/CodeGen/AArch64/early-ifcvt-regclass-mismatch.mir
+++ b/llvm/test/CodeGen/AArch64/early-ifcvt-regclass-mismatch.mir
@@ -7,7 +7,7 @@
     br i1 undef, label %if.then139.i, label %if.else142.i
 
   if.then139.i:                                     ; preds = %entry
-    %0 = load double, double* undef, align 8
+    %0 = load double, ptr undef, align 8
     br label %if.end161.i
 
   if.else142.i:                                     ; preds = %entry
@@ -35,7 +35,7 @@
     ret void
   }
   declare double @llvm.fabs.f64(double) #1
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { "target-cpu"="apple-a7" }
   attributes #1 = { nounwind readnone speculatable willreturn }
@@ -128,7 +128,7 @@ body:             |
   bb.2.if.then139.i:
     successors: %bb.5(0x80000000)
 
-    %7:gpr64 = LDRXui %8, 0 :: (load (s64) from `double* undef`)
+    %7:gpr64 = LDRXui %8, 0 :: (load (s64) from `ptr undef`)
     B %bb.5
 
   bb.3.if.else142.i:

diff  --git a/llvm/test/CodeGen/AArch64/elim-dead-mi.mir b/llvm/test/CodeGen/AArch64/elim-dead-mi.mir
index 0510a8798a49d..0542b46f2e393 100644
--- a/llvm/test/CodeGen/AArch64/elim-dead-mi.mir
+++ b/llvm/test/CodeGen/AArch64/elim-dead-mi.mir
@@ -5,7 +5,7 @@
   @d = common dso_local local_unnamed_addr global i32 0, align 4
 
   define dso_local i32 @main() local_unnamed_addr {
-  %scevgep = getelementptr i8, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @c, i64 0, i64 1), i64 0
+  %scevgep = getelementptr i8, ptr getelementptr inbounds ([3 x i8], ptr @c, i64 0, i64 1), i64 0
   ret i32 0
   }
 ...

diff  --git a/llvm/test/CodeGen/AArch64/expand-blr-rvmarker-pseudo.mir b/llvm/test/CodeGen/AArch64/expand-blr-rvmarker-pseudo.mir
index 37d1623b2cd8d..b1e48346c2746 100644
--- a/llvm/test/CodeGen/AArch64/expand-blr-rvmarker-pseudo.mir
+++ b/llvm/test/CodeGen/AArch64/expand-blr-rvmarker-pseudo.mir
@@ -29,7 +29,7 @@
     ret void
   }
 
-  declare i8* @attachedcall()
+  declare ptr @attachedcall()
 
   declare ptr @objc_retainAutoreleasedReturnValue()
 ...

diff  --git a/llvm/test/CodeGen/AArch64/fmov-imm-licm.ll b/llvm/test/CodeGen/AArch64/fmov-imm-licm.ll
index 29061840c96bf..23a1013767d80 100644
--- a/llvm/test/CodeGen/AArch64/fmov-imm-licm.ll
+++ b/llvm/test/CodeGen/AArch64/fmov-imm-licm.ll
@@ -6,28 +6,28 @@
 ; and also by checking that we're not spilling any FP callee-saved
 ; registers.
 
-%struct.Node = type { %struct.Node*, i8* }
+%struct.Node = type { ptr, ptr }
 
-define void @process_nodes(%struct.Node* %0) {
+define void @process_nodes(ptr %0) {
 ; CHECK-LABEL: process_nodes:
 ; CHECK-NOT:   stp {{d[0-9]+}}
 ; CHECK-LABEL: .LBB0_2:
 ; CHECK:       fmov s0, #1.00000000
 ; CHECK:       bl do_it
 entry:
-  %1 = icmp eq %struct.Node* %0, null
+  %1 = icmp eq ptr %0, null
   br i1 %1, label %exit, label %loop
 
 loop:
-  %2 = phi %struct.Node* [ %4, %loop ], [ %0, %entry ]
-  tail call void @do_it(float 1.000000e+00, %struct.Node* nonnull %2)
-  %3 = getelementptr inbounds %struct.Node, %struct.Node* %2, i64 0, i32 0
-  %4 = load %struct.Node*, %struct.Node** %3, align 8
-  %5 = icmp eq %struct.Node* %4, null
+  %2 = phi ptr [ %4, %loop ], [ %0, %entry ]
+  tail call void @do_it(float 1.000000e+00, ptr nonnull %2)
+  %3 = getelementptr inbounds %struct.Node, ptr %2, i64 0, i32 0
+  %4 = load ptr, ptr %3, align 8
+  %5 = icmp eq ptr %4, null
   br i1 %5, label %exit, label %loop
 
 exit:
   ret void
 }
 
-declare void @do_it(float, %struct.Node*)
+declare void @do_it(float, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll b/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
index 78a4af02d7e0d..e600bbaee6cba 100644
--- a/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
+++ b/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
@@ -15,24 +15,24 @@ entry:
   ret <vscale x 16 x i1> %1
 }
 
-define <vscale x 4 x float> @foo2(<vscale x 4 x i32> *%in) {
+define <vscale x 4 x float> @foo2(ptr %in) {
 entry:
-  %0 = load <vscale x 4 x i32>, <vscale x 4 x i32>* %in, align 16
+  %0 = load <vscale x 4 x i32>, ptr %in, align 16
   %1 = call <vscale x 4 x float> asm sideeffect "ptrue p0.s, #1 \0Afabs $0.s, p0/m, $1.s \0A", "=w,r"(<vscale x 4 x i32> %0)
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 16 x i1> @foo3(<vscale x 16 x i1> *%in) {
+define <vscale x 16 x i1> @foo3(ptr %in) {
 entry:
-  %0 = load <vscale x 16 x i1>, <vscale x 16 x i1>* %in, align 2
+  %0 = load <vscale x 16 x i1>, ptr %in, align 2
   %1 = call <vscale x 16 x i1> asm sideeffect "mov $0.b, $1.b \0A", "=&w,w"(<vscale x 16 x i1> %0)
   ret <vscale x 16 x i1> %1
 }
 
-define half @foo4(<vscale x 16 x i1> *%inp, <vscale x 8 x half> *%inv) {
+define half @foo4(ptr %inp, ptr %inv) {
 entry:
-  %0 = load <vscale x 16 x i1>, <vscale x 16 x i1>* %inp, align 2
-  %1 = load <vscale x 8 x half>, <vscale x 8 x half>* %inv, align 16
+  %0 = load <vscale x 16 x i1>, ptr %inp, align 2
+  %1 = load <vscale x 8 x half>, ptr %inv, align 16
   %2 = call half asm "fminv ${0:h}, $1, $2.h", "=r, at 3Upl,w"(<vscale x 16 x i1> %0, <vscale x 8 x half> %1)
   ret half %2
 }

diff  --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
index b80ea04823e9f..29f9c0336bbcc 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
@@ -5,7 +5,7 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; SCALABLE INSERTED INTO SCALABLE TESTS
 
-define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_zero_i8(<vscale x 8 x i8>* %a, <vscale x 4 x i8>* %b) #0 {
+define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_zero_i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_zero_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -15,13 +15,13 @@ define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_zero_i8(<vscale x 8 x
 ; CHECK-NEXT:    uunpkhi z0.s, z0.h
 ; CHECK-NEXT:    uzp1 z0.h, z1.h, z0.h
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
-  %subvec = load <vscale x 4 x i8>, <vscale x 4 x i8>* %b
+  %vec = load <vscale x 8 x i8>, ptr %a
+  %subvec = load <vscale x 4 x i8>, ptr %b
   %ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> %vec, <vscale x 4 x i8> %subvec, i64 0)
   ret <vscale x 8 x i8> %ins
 }
 
-define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_nonzero_i8(<vscale x 8 x i8>* %a, <vscale x 4 x i8>* %b) #0 {
+define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_nonzero_i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_nonzero_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -31,13 +31,13 @@ define <vscale x 8 x i8> @vec_scalable_subvec_scalable_idx_nonzero_i8(<vscale x
 ; CHECK-NEXT:    uunpklo z0.s, z0.h
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
-  %subvec = load <vscale x 4 x i8>, <vscale x 4 x i8>* %b
+  %vec = load <vscale x 8 x i8>, ptr %a
+  %subvec = load <vscale x 4 x i8>, ptr %b
   %ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> %vec, <vscale x 4 x i8> %subvec, i64 4)
   ret <vscale x 8 x i8> %ins
 }
 
-define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_zero_i16(<vscale x 4 x i16>* %a, <vscale x 2 x i16>* %b) #0 {
+define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_zero_i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_zero_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -47,13 +47,13 @@ define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_zero_i16(<vscale x 4
 ; CHECK-NEXT:    uunpkhi z0.d, z0.s
 ; CHECK-NEXT:    uzp1 z0.s, z1.s, z0.s
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
-  %subvec = load <vscale x 2 x i16>, <vscale x 2 x i16>* %b
+  %vec = load <vscale x 4 x i16>, ptr %a
+  %subvec = load <vscale x 2 x i16>, ptr %b
   %ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> %vec, <vscale x 2 x i16> %subvec, i64 0)
   ret <vscale x 4 x i16> %ins
 }
 
-define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_nonzero_i16(<vscale x 4 x i16>* %a, <vscale x 2 x i16>* %b) #0 {
+define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_nonzero_i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_scalable_idx_nonzero_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -63,15 +63,15 @@ define <vscale x 4 x i16> @vec_scalable_subvec_scalable_idx_nonzero_i16(<vscale
 ; CHECK-NEXT:    uunpklo z0.d, z0.s
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
-  %subvec = load <vscale x 2 x i16>, <vscale x 2 x i16>* %b
+  %vec = load <vscale x 4 x i16>, ptr %a
+  %subvec = load <vscale x 2 x i16>, ptr %b
   %ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> %vec, <vscale x 2 x i16> %subvec, i64 2)
   ret <vscale x 4 x i16> %ins
 }
 
 ; FIXED INSERTED INTO SCALABLE TESTS
 
-define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_zero_i8(<vscale x 8 x i8>* %a, <8 x i8>* %b) #0 {
+define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_zero_i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -81,13 +81,13 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_zero_i8(<vscale x 8 x i8
 ; CHECK-NEXT:    ld1b { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    sel z0.h, p1, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
-  %subvec = load <8 x i8>, <8 x i8>* %b
+  %vec = load <vscale x 8 x i8>, ptr %a
+  %subvec = load <8 x i8>, ptr %b
   %ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> %vec, <8 x i8> %subvec, i64 0)
   ret <vscale x 8 x i8> %ins
 }
 
-define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(<vscale x 8 x i8>* %a, <8 x i8>* %b) #0 {
+define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_nonzero_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -109,13 +109,13 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(<vscale x 8 x
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 8 x i8>, <vscale x 8 x i8>* %a
-  %subvec = load <8 x i8>, <8 x i8>* %b
+  %vec = load <vscale x 8 x i8>, ptr %a
+  %subvec = load <8 x i8>, ptr %b
   %ins = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> %vec, <8 x i8> %subvec, i64 8)
   ret <vscale x 8 x i8> %ins
 }
 
-define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_zero_i16(<vscale x 4 x i16>* %a, <4 x i16>* %b) #0 {
+define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_zero_i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -125,13 +125,13 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_zero_i16(<vscale x 4 x
 ; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0]
 ; CHECK-NEXT:    sel z0.s, p1, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
-  %subvec = load <4 x i16>, <4 x i16>* %b
+  %vec = load <vscale x 4 x i16>, ptr %a
+  %subvec = load <4 x i16>, ptr %b
   %ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16> %vec, <4 x i16> %subvec, i64 0)
   ret <vscale x 4 x i16> %ins
 }
 
-define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(<vscale x 4 x i16>* %a, <4 x i16>* %b) #0 {
+define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_nonzero_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -153,13 +153,13 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(<vscale x 4
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
-  %subvec = load <4 x i16>, <4 x i16>* %b
+  %vec = load <vscale x 4 x i16>, ptr %a
+  %subvec = load <4 x i16>, ptr %b
   %ins = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v4i16(<vscale x 4 x i16> %vec, <4 x i16> %subvec, i64 4)
   ret <vscale x 4 x i16> %ins
 }
 
-define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_zero_i32(<vscale x 2 x i32>* %a, <2 x i32>* %b) #0 {
+define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_zero_i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_zero_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -169,13 +169,13 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_zero_i32(<vscale x 2 x
 ; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
 ; CHECK-NEXT:    sel z0.d, p1, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 2 x i32>, <vscale x 2 x i32>* %a
-  %subvec = load <2 x i32>, <2 x i32>* %b
+  %vec = load <vscale x 2 x i32>, ptr %a
+  %subvec = load <2 x i32>, ptr %b
   %ins = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32> %vec, <2 x i32> %subvec, i64 0)
   ret <vscale x 2 x i32> %ins
 }
 
-define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(<vscale x 2 x i32>* %a, <2 x i32>* %b) #0 {
+define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_nonzero_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -197,13 +197,13 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(<vscale x 2
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 2 x i32>, <vscale x 2 x i32>* %a
-  %subvec = load <2 x i32>, <2 x i32>* %b
+  %vec = load <vscale x 2 x i32>, ptr %a
+  %subvec = load <2 x i32>, ptr %b
   %ins = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v2i32(<vscale x 2 x i32> %vec, <2 x i32> %subvec, i64 2)
   ret <vscale x 2 x i32> %ins
 }
 
-define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(<vscale x 2 x i32>* %a, <8 x i32>* %b) #1 {
+define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(ptr %a, ptr %b) #1 {
 ; CHECK-LABEL: vec_scalable_subvec_fixed_idx_nonzero_large_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -218,8 +218,8 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(<vsca
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %vec = load <vscale x 2 x i32>, <vscale x 2 x i32>* %a
-  %subvec = load <8 x i32>, <8 x i32>* %b
+  %vec = load <vscale x 2 x i32>, ptr %a
+  %subvec = load <8 x i32>, ptr %b
   %ins = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> %vec, <8 x i32> %subvec, i64 8)
   ret <vscale x 2 x i32> %ins
 }

diff  --git a/llvm/test/CodeGen/AArch64/irg-nomem.mir b/llvm/test/CodeGen/AArch64/irg-nomem.mir
index bc247b0dbf9e3..3b000fafbed46 100644
--- a/llvm/test/CodeGen/AArch64/irg-nomem.mir
+++ b/llvm/test/CodeGen/AArch64/irg-nomem.mir
@@ -4,18 +4,18 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-unknown-linux-android"
 
-  define void @f(i64* nocapture %x) "target-features"="+mte" {
+  define void @f(ptr nocapture %x) "target-features"="+mte" {
   entry:
-    store i64 1, i64* %x, align 8
-    %0 = tail call i8* @llvm.aarch64.irg(i8* null, i64 0)
-    %1 = tail call i8* @llvm.aarch64.irg.sp(i64 0)
-    %arrayidx1 = getelementptr inbounds i64, i64* %x, i64 1
-    store i64 1, i64* %arrayidx1, align 8
+    store i64 1, ptr %x, align 8
+    %0 = tail call ptr @llvm.aarch64.irg(ptr null, i64 0)
+    %1 = tail call ptr @llvm.aarch64.irg.sp(i64 0)
+    %arrayidx1 = getelementptr inbounds i64, ptr %x, i64 1
+    store i64 1, ptr %arrayidx1, align 8
     ret void
   }
 
-  declare i8* @llvm.aarch64.irg(i8*, i64) nounwind
-  declare i8* @llvm.aarch64.irg.sp(i64) nounwind
+  declare ptr @llvm.aarch64.irg(ptr, i64) nounwind
+  declare ptr @llvm.aarch64.irg.sp(i64) nounwind
 ...
 ---
 name:            f

diff  --git a/llvm/test/CodeGen/AArch64/ldradr.ll b/llvm/test/CodeGen/AArch64/ldradr.ll
index e5f3fd3d770ba..8a20f5d5d0f73 100644
--- a/llvm/test/CodeGen/AArch64/ldradr.ll
+++ b/llvm/test/CodeGen/AArch64/ldradr.ll
@@ -3,7 +3,7 @@
 
 %struct.T = type <{ i32, i64, i8, i32 }>
 
- at ptr = external dso_local local_unnamed_addr global i32*, align 8
+ at ptr = external dso_local local_unnamed_addr global ptr, align 8
 @ch = external dso_local local_unnamed_addr global i32, align 4
 @ch8 = external dso_local local_unnamed_addr global i8, align 4
 @t = external dso_local local_unnamed_addr global %struct.T, align 4
@@ -17,8 +17,8 @@ define i32 @barp() {
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32*, i32** @ptr, align 8
-  %1 = load i32, i32* %0, align 4
+  %0 = load ptr, ptr @ptr, align 8
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
@@ -28,7 +28,7 @@ define i32 @barch() {
 ; CHECK-NEXT:    ldr w0, ch
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @ch, align 4
+  %0 = load i32, ptr @ch, align 4
   ret i32 %0
 }
 
@@ -38,7 +38,7 @@ define i32 @barta() {
 ; CHECK-NEXT:    ldr w0, t
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* getelementptr inbounds (%struct.T, %struct.T* @t, i64 0, i32 0), align 4
+  %0 = load i32, ptr @t, align 4
   ret i32 %0
 }
 
@@ -48,7 +48,7 @@ define i64 @bartb() {
 ; CHECK-NEXT:    ldr x0, t+4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i64, i64* getelementptr inbounds (%struct.T, %struct.T* @t, i64 0, i32 1), align 8
+  %0 = load i64, ptr getelementptr inbounds (%struct.T, ptr @t, i64 0, i32 1), align 8
   ret i64 %0
 }
 
@@ -59,7 +59,7 @@ define i32 @bartc() {
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* getelementptr inbounds (%struct.T, %struct.T* @t, i64 0, i32 3), align 1
+  %0 = load i32, ptr getelementptr inbounds (%struct.T, ptr @t, i64 0, i32 3), align 1
   ret i32 %0
 }
 
@@ -70,7 +70,7 @@ define i32 @bart2a() {
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* getelementptr inbounds (%struct.T, %struct.T* @t2, i64 0, i32 0), align 2
+  %0 = load i32, ptr @t2, align 2
   ret i32 %0
 }
 
@@ -80,7 +80,7 @@ define i64 @zextload() {
 ; CHECK-NEXT:    ldr w0, ch
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @ch, align 4
+  %0 = load i32, ptr @ch, align 4
   %1 = zext i32 %0 to i64
   ret i64 %1
 }
@@ -92,7 +92,7 @@ define i64 @zextload8() {
 ; CHECK-NEXT:    ldrb w0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* @ch8, align 4
+  %0 = load i8, ptr @ch8, align 4
   %1 = zext i8 %0 to i64
   ret i64 %1
 }
@@ -103,7 +103,7 @@ define i64 @sextload() {
 ; CHECK-NEXT:    ldrsw x0, ch
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @ch, align 4
+  %0 = load i32, ptr @ch, align 4
   %1 = sext i32 %0 to i64
   ret i64 %1
 }
@@ -115,7 +115,7 @@ define i64 @sextload8() {
 ; CHECK-NEXT:    ldrsb x0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* @ch8, align 4
+  %0 = load i8, ptr @ch8, align 4
   %1 = sext i8 %0 to i64
   ret i64 %1
 }
@@ -126,7 +126,7 @@ define float @floatload() {
 ; CHECK-NEXT:    ldr s0, f
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load float, float* @f, align 4
+  %0 = load float, ptr @f, align 4
   ret float %0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/ldst-nopreidx-sp-redzone.mir b/llvm/test/CodeGen/AArch64/ldst-nopreidx-sp-redzone.mir
index bf2043d07f9e4..f1f9e5fbc9b08 100644
--- a/llvm/test/CodeGen/AArch64/ldst-nopreidx-sp-redzone.mir
+++ b/llvm/test/CodeGen/AArch64/ldst-nopreidx-sp-redzone.mir
@@ -4,7 +4,7 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "arm64e-apple-ios13.0"
   
-  %struct.widget = type { i64, i64, i32, i32, i64, i64, i64, i64, i64, i32, i32, i32, i16, i32, %struct.snork*, %struct.zot, %struct.zot, %struct.zot, %struct.zot, %struct.zot, i64, i64, i64, i32, i64, i32, i32, i32, i8*, %struct.baz, %struct.baz, i64, i64, %struct.snork*, %struct.zot, i32, i32, i32, i32, i32, i32, i32, [32 x i8], i64, i64, %struct.wombat, i32, i64, i64, i64, i64 }
+  %struct.widget = type { i64, i64, i32, i32, i64, i64, i64, i64, i64, i32, i32, i32, i16, i32, ptr, %struct.zot, %struct.zot, %struct.zot, %struct.zot, %struct.zot, i64, i64, i64, i32, i64, i32, i32, i32, ptr, %struct.baz, %struct.baz, i64, i64, ptr, %struct.zot, i32, i32, i32, i32, i32, i32, i32, [32 x i8], i64, i64, %struct.wombat, i32, i64, i64, i64, i64 }
   %struct.baz = type { [4 x i32] }
   %struct.snork = type { i32, i32, [1 x %struct.spam] }
   %struct.spam = type { %struct.baz, i32, i32 }
@@ -12,59 +12,59 @@
   %struct.wombat = type { [2 x i32] }
   %struct.wombat.0 = type { [200 x i32] }
   
-  @__stack_chk_guard = external global i8*
+  @__stack_chk_guard = external global ptr
   
   ; Function Attrs: noredzone ssp
   define hidden void @with_noredzone_80bytes() #0 {
   bb:
-    %StackGuardSlot = alloca i8*, align 8
-    %0 = call i8* @llvm.stackguard()
-    call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot)
+    %StackGuardSlot = alloca ptr, align 8
+    %0 = call ptr @llvm.stackguard()
+    call void @llvm.stackprotector(ptr %0, ptr %StackGuardSlot)
     %tmp = alloca %struct.widget, align 16
-    %tmp1 = alloca %struct.wombat.0*, align 8
-    %tmp2 = alloca %struct.wombat.0*, align 8
-    %tmp3 = alloca %struct.wombat.0*, align 8
-    %tmp4 = alloca %struct.wombat.0*, align 8
-    store %struct.wombat.0* null, %struct.wombat.0** %tmp3, align 8
-    store %struct.wombat.0* null, %struct.wombat.0** %tmp4, align 8
+    %tmp1 = alloca ptr, align 8
+    %tmp2 = alloca ptr, align 8
+    %tmp3 = alloca ptr, align 8
+    %tmp4 = alloca ptr, align 8
+    store ptr null, ptr %tmp3, align 8
+    store ptr null, ptr %tmp4, align 8
     ret void
   }
 
   define hidden void @with_redzone_480bytes() #2 {
   bb:
-    %StackGuardSlot = alloca i8*, align 8
-    %0 = call i8* @llvm.stackguard()
-    call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot)
+    %StackGuardSlot = alloca ptr, align 8
+    %0 = call ptr @llvm.stackguard()
+    call void @llvm.stackprotector(ptr %0, ptr %StackGuardSlot)
     %tmp = alloca %struct.widget, align 16
-    %tmp1 = alloca %struct.wombat.0*, align 8
-    %tmp2 = alloca %struct.wombat.0*, align 8
-    %tmp3 = alloca %struct.wombat.0*, align 8
-    %tmp4 = alloca %struct.wombat.0*, align 8
-    store %struct.wombat.0* null, %struct.wombat.0** %tmp3, align 8
-    store %struct.wombat.0* null, %struct.wombat.0** %tmp4, align 8
+    %tmp1 = alloca ptr, align 8
+    %tmp2 = alloca ptr, align 8
+    %tmp3 = alloca ptr, align 8
+    %tmp4 = alloca ptr, align 8
+    store ptr null, ptr %tmp3, align 8
+    store ptr null, ptr %tmp4, align 8
     ret void
   }
 
   define hidden void @with_noredzone_no_mem_between() #0 {
   bb:
-    %StackGuardSlot = alloca i8*, align 8
-    %0 = call i8* @llvm.stackguard()
-    call void @llvm.stackprotector(i8* %0, i8** %StackGuardSlot)
+    %StackGuardSlot = alloca ptr, align 8
+    %0 = call ptr @llvm.stackguard()
+    call void @llvm.stackprotector(ptr %0, ptr %StackGuardSlot)
     %tmp = alloca %struct.widget, align 16
-    %tmp1 = alloca %struct.wombat.0*, align 8
-    %tmp2 = alloca %struct.wombat.0*, align 8
-    %tmp3 = alloca %struct.wombat.0*, align 8
-    %tmp4 = alloca %struct.wombat.0*, align 8
-    store %struct.wombat.0* null, %struct.wombat.0** %tmp3, align 8
-    store %struct.wombat.0* null, %struct.wombat.0** %tmp4, align 8
+    %tmp1 = alloca ptr, align 8
+    %tmp2 = alloca ptr, align 8
+    %tmp3 = alloca ptr, align 8
+    %tmp4 = alloca ptr, align 8
+    store ptr null, ptr %tmp3, align 8
+    store ptr null, ptr %tmp4, align 8
     ret void
   }
 
   ; Function Attrs: nofree nosync nounwind willreturn
-  declare i8* @llvm.stackguard() #1
+  declare ptr @llvm.stackguard() #1
   
   ; Function Attrs: nofree nosync nounwind willreturn
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { noredzone ssp "frame-pointer"="non-leaf" }
   attributes #1 = { nofree nosync nounwind willreturn }

diff  --git a/llvm/test/CodeGen/AArch64/ldst-opt-aa.mir b/llvm/test/CodeGen/AArch64/ldst-opt-aa.mir
index 16369af9ccc6d..914f09ccb6a84 100644
--- a/llvm/test/CodeGen/AArch64/ldst-opt-aa.mir
+++ b/llvm/test/CodeGen/AArch64/ldst-opt-aa.mir
@@ -1,13 +1,13 @@
 # RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-ldst-opt %s -verify-machineinstrs -o - | FileCheck %s
 --- |
-  define void @ldr_str_aa(i32* noalias nocapture %x, i32* noalias nocapture readonly %y) {
+  define void @ldr_str_aa(ptr noalias nocapture %x, ptr noalias nocapture readonly %y) {
   entry:
-    %0 = load i32, i32* %y, align 4
-    store i32 %0, i32* %x, align 4
-    %arrayidx2 = getelementptr inbounds i32, i32* %y, i32 1
-    %1 = load i32, i32* %arrayidx2, align 4
-    %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 1
-    store i32 %1, i32* %arrayidx3, align 4
+    %0 = load i32, ptr %y, align 4
+    store i32 %0, ptr %x, align 4
+    %arrayidx2 = getelementptr inbounds i32, ptr %y, i32 1
+    %1 = load i32, ptr %arrayidx2, align 4
+    %arrayidx3 = getelementptr inbounds i32, ptr %x, i32 1
+    store i32 %1, ptr %arrayidx3, align 4
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/ldst-opt-non-imm-offset.mir b/llvm/test/CodeGen/AArch64/ldst-opt-non-imm-offset.mir
index a39f001f44b21..a3e154a7c1573 100644
--- a/llvm/test/CodeGen/AArch64/ldst-opt-non-imm-offset.mir
+++ b/llvm/test/CodeGen/AArch64/ldst-opt-non-imm-offset.mir
@@ -4,8 +4,8 @@
 
   define i32 @test() {
   entry:
-    store i32 0, i32* @g, align 4
-    %0 = load i32, i32* undef, align 4
+    store i32 0, ptr @g, align 4
+    %0 = load i32, ptr undef, align 4
     ret i32 %0
   }
 
@@ -21,7 +21,7 @@ body:             |
   bb.0.entry:
     renamable $x8 = ADRP target-flags(aarch64-page) @g
     STRWui $wzr, killed renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @g :: (store (s32) into @g)
-    renamable $w0 = LDRWui undef renamable $x8, 0 :: (load (s32) from `i32* undef`)
+    renamable $w0 = LDRWui undef renamable $x8, 0 :: (load (s32) from `ptr undef`)
     RET_ReallyLR implicit $w0
 
 ...

diff  --git a/llvm/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir b/llvm/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir
index dfd5b0da7a683..c0cff21d5fc81 100644
--- a/llvm/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir
+++ b/llvm/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass aarch64-ldst-opt  -verify-machineinstrs  -o - %s | FileCheck %s
 
 --- |
-  define i1 @no-clobber-zr(i64* %p, i64 %x) { ret i1 0 }
+  define i1 @no-clobber-zr(ptr %p, i64 %x) { ret i1 0 }
 ...
 ---
 # Check that write of xzr doesn't inhibit pairing of xzr stores since

diff  --git a/llvm/test/CodeGen/AArch64/machine-combiner-fmul-dup.mir b/llvm/test/CodeGen/AArch64/machine-combiner-fmul-dup.mir
index 3f08307ff8539..ae5a7401649d4 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner-fmul-dup.mir
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-fmul-dup.mir
@@ -6,7 +6,7 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-unknown-linux-gnu"
 
-  define void @indexed_2s(<2 x float> %shuf, <2 x float> %mu, <2 x float> %ad, <2 x float>* %ret) #0 {
+  define void @indexed_2s(<2 x float> %shuf, <2 x float> %mu, <2 x float> %ad, ptr %ret) #0 {
   entry:
     %shuffle = shufflevector <2 x float> %shuf, <2 x float> undef, <2 x i32> zeroinitializer
     br label %for.cond
@@ -14,11 +14,11 @@
   for.cond:                                         ; preds = %for.cond, %entry
     %mul = fmul <2 x float> %mu, %shuffle
     %add = fadd <2 x float> %mul, %ad
-    store <2 x float> %add, <2 x float>* %ret, align 16
+    store <2 x float> %add, ptr %ret, align 16
     br label %for.cond
   }
 
-  define void @indexed_2s_rev(<2 x float> %shuf, <2 x float> %mu, <2 x float> %ad, <2 x float>* %ret) #0 {
+  define void @indexed_2s_rev(<2 x float> %shuf, <2 x float> %mu, <2 x float> %ad, ptr %ret) #0 {
   entry:
     %shuffle = shufflevector <2 x float> %shuf, <2 x float> undef, <2 x i32> zeroinitializer
     br label %for.cond
@@ -26,11 +26,11 @@
   for.cond:                                         ; preds = %for.cond, %entry
     %mul = fmul <2 x float> %shuffle, %mu
     %add = fadd <2 x float> %mul, %ad
-    store <2 x float> %add, <2 x float>* %ret, align 16
+    store <2 x float> %add, ptr %ret, align 16
     br label %for.cond
   }
 
-  define void @indexed_2d(<2 x double> %shuf, <2 x double> %mu, <2 x double> %ad, <2 x double>* %ret) #0 {
+  define void @indexed_2d(<2 x double> %shuf, <2 x double> %mu, <2 x double> %ad, ptr %ret) #0 {
   entry:
     %shuffle = shufflevector <2 x double> %shuf, <2 x double> undef, <2 x i32> zeroinitializer
     br label %for.cond
@@ -38,11 +38,11 @@
   for.cond:                                         ; preds = %for.cond, %entry
     %mul = fmul <2 x double> %mu, %shuffle
     %add = fadd <2 x double> %mul, %ad
-    store <2 x double> %add, <2 x double>* %ret, align 16
+    store <2 x double> %add, ptr %ret, align 16
     br label %for.cond
   }
 
-  define void @indexed_4s(<4 x float> %shuf, <4 x float> %mu, <4 x float> %ad, <4 x float>* %ret) #0 {
+  define void @indexed_4s(<4 x float> %shuf, <4 x float> %mu, <4 x float> %ad, ptr %ret) #0 {
   entry:
     %shuffle = shufflevector <4 x float> %shuf, <4 x float> undef, <4 x i32> zeroinitializer
     br label %for.cond
@@ -50,11 +50,11 @@
   for.cond:                                         ; preds = %for.cond, %entry
     %mul = fmul <4 x float> %mu, %shuffle
     %add = fadd <4 x float> %mul, %ad
-    store <4 x float> %add, <4 x float>* %ret, align 16
+    store <4 x float> %add, ptr %ret, align 16
     br label %for.cond
   }
 
-  define void @indexed_4h(<4 x half> %shuf, <4 x half> %mu, <4 x half> %ad, <4 x half>* %ret) #0 {
+  define void @indexed_4h(<4 x half> %shuf, <4 x half> %mu, <4 x half> %ad, ptr %ret) #0 {
   entry:
     %shuffle = shufflevector <4 x half> %shuf, <4 x half> undef, <4 x i32> zeroinitializer
     br label %for.cond
@@ -62,11 +62,11 @@
   for.cond:
     %mul = fmul <4 x half> %mu, %shuffle
     %add = fadd <4 x half> %mul, %ad
-    store <4 x half> %add, <4 x half>* %ret, align 16
+    store <4 x half> %add, ptr %ret, align 16
     br label %for.cond
   }
 
-  define void @indexed_8h(<8 x half> %shuf, <8 x half> %mu, <8 x half> %ad, <8 x half>* %ret) #0 {
+  define void @indexed_8h(<8 x half> %shuf, <8 x half> %mu, <8 x half> %ad, ptr %ret) #0 {
   entry:
     %shuffle = shufflevector <8 x half> %shuf, <8 x half> undef, <8 x i32> zeroinitializer
     br label %for.cond
@@ -74,28 +74,28 @@
   for.cond:
     %mul = fmul <8 x half> %mu, %shuffle
     %add = fadd <8 x half> %mul, %ad
-    store <8 x half> %add, <8 x half>* %ret, align 16
+    store <8 x half> %add, ptr %ret, align 16
     br label %for.cond
   }
 
   define void @kill_state(<2 x float> %shuf, <2 x float> %mu, <2 x float> %ad,
-                          <2 x float>* %ret, <2 x float>* %ret2, float %f) #0 {
+                          ptr %ret, ptr %ret2, float %f) #0 {
   entry:
     %zero_elem = extractelement <2 x float> %shuf, i32 0
     %ins = insertelement <2 x float> undef, float %zero_elem, i32 0
     %shuffle = shufflevector <2 x float> %ins, <2 x float> undef, <2 x i32> zeroinitializer
     %ins2 = insertelement <2 x float> %ins, float %f, i32 1
-    store <2 x float> %ins2, <2 x float>* %ret2, align 8
+    store <2 x float> %ins2, ptr %ret2, align 8
     br label %for.cond
 
   for.cond:                                         ; preds = %for.cond, %entry
     %mul = fmul <2 x float> %mu, %shuffle
     %add = fadd <2 x float> %mul, %ad
-    store <2 x float> %add, <2 x float>* %ret, align 16
+    store <2 x float> %add, ptr %ret, align 16
     br label %for.cond
   }
 
-  define void @extracopy(<2 x float> %shuf, <2 x float> %mu, <2 x float> %ad, <2 x float>* %ret) #0 {
+  define void @extracopy(<2 x float> %shuf, <2 x float> %mu, <2 x float> %ad, ptr %ret) #0 {
     unreachable
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-bti.mir b/llvm/test/CodeGen/AArch64/machine-outliner-bti.mir
index 3a6cd273eac57..ac31440ed7b6b 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-bti.mir
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-bti.mir
@@ -15,7 +15,7 @@
 --- |
   @g = hidden local_unnamed_addr global i32 0, align 4
 
-  define hidden void @bar(void ()* nocapture %f) "branch-target-enforcement"="true" {
+  define hidden void @bar(ptr nocapture %f) "branch-target-enforcement"="true" {
   entry:
     ret void
   }

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.mir b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.mir
index 7d5a7e247087f..c1c2720dec6ad 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.mir
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.mir
@@ -4,57 +4,57 @@
   target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-arm-linux-gnu"
 
-  @v = common dso_local global i32* null, align 8
+  @v = common dso_local global ptr null, align 8
 
   ; Function Attrs: nounwind
   define dso_local void @legal0() #0 {
     %1 = alloca i32, align 4
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
     ret void
   }
 
   ; Function Attrs: nounwind
   define dso_local void @legal1() #0 {
     %1 = alloca i32, align 4
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
     ret void
   }
 
   ; Function Attrs: nounwind
   define dso_local void @illegal0() #0 {
     %1 = alloca i32, align 4
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
     ret void
   }
 
   ; Function Attrs: nounwind
   define dso_local void @illegal1() #0 {
     %1 = alloca i32, align 4
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
-    store volatile i32* %1, i32** @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
+    store volatile ptr %1, ptr @v, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-scheduler.mir b/llvm/test/CodeGen/AArch64/machine-scheduler.mir
index 09f30337fdd26..6c0222f4fdd78 100644
--- a/llvm/test/CodeGen/AArch64/machine-scheduler.mir
+++ b/llvm/test/CodeGen/AArch64/machine-scheduler.mir
@@ -1,15 +1,15 @@
 # RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass machine-scheduler  -verify-machineinstrs  -o - %s | FileCheck %s
 
 --- |
-  define i64 @load_imp-def(i64* nocapture %P, i32 %v) {
+  define i64 @load_imp-def(ptr nocapture %P, i32 %v) {
   entry:
-    %0 = bitcast i64* %P to i32*
-    %1 = load i32, i32* %0
+    %0 = bitcast ptr %P to ptr
+    %1 = load i32, ptr %0
     %conv = zext i32 %1 to i64
-    %arrayidx19 = getelementptr inbounds i64, i64* %P, i64 1
-    %arrayidx1 = bitcast i64* %arrayidx19 to i32*
-    store i32 %v, i32* %arrayidx1
-    %2 = load i64, i64* %arrayidx19
+    %arrayidx19 = getelementptr inbounds i64, ptr %P, i64 1
+    %arrayidx1 = bitcast ptr %arrayidx19 to ptr
+    store i32 %v, ptr %arrayidx1
+    %2 = load i64, ptr %arrayidx19
     %and = and i64 %2, 4294967295
     %add = add nuw nsw i64 %and, %conv
     ret i64 %add

diff  --git a/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll b/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll
index f122c94d5cffa..2271190963d16 100644
--- a/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll
+++ b/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll
@@ -11,7 +11,7 @@
 ; MIR-LABEL: name: test_memcpy
 ; MIR:      %2:fpr128 = LDRQui %0, 1 :: (load (s128) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: STRQui killed %2, %0, 0 :: (store (s128) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
+define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
 ; CHECK-LABEL: test_memcpy:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w10, [x1]
@@ -20,13 +20,13 @@ define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
 ; CHECK-NEXT:    add w0, w9, w10
 ; CHECK-NEXT:    str q0, [x8]
 ; CHECK-NEXT:    ret
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  tail call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -34,7 +34,7 @@ define i32 @test_memcpy(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR-LABEL: name: test_memcpy_inline
 ; MIR:      %2:fpr128 = LDRQui %0, 1 :: (load (s128) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: STRQui killed %2, %0, 0 :: (store (s128) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
+define i32 @test_memcpy_inline(ptr nocapture %p, ptr nocapture readonly %q) {
 ; CHECK-LABEL: test_memcpy_inline:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w10, [x1]
@@ -43,13 +43,13 @@ define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
 ; CHECK-NEXT:    add w0, w9, w10
 ; CHECK-NEXT:    str q0, [x8]
 ; CHECK-NEXT:    ret
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  tail call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  tail call void @llvm.memcpy.inline.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -57,7 +57,7 @@ define i32 @test_memcpy_inline(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR-LABEL: name: test_memmove
 ; MIR:      %2:fpr128 = LDRQui %0, 1 :: (load (s128) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: STRQui killed %2, %0, 0 :: (store (s128) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memmove(i32* nocapture %p, i32* nocapture readonly %q) {
+define i32 @test_memmove(ptr nocapture %p, ptr nocapture readonly %q) {
 ; CHECK-LABEL: test_memmove:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w10, [x1]
@@ -66,13 +66,13 @@ define i32 @test_memmove(i32* nocapture %p, i32* nocapture readonly %q) {
 ; CHECK-NEXT:    add w0, w9, w10
 ; CHECK-NEXT:    str q0, [x8]
 ; CHECK-NEXT:    ret
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  tail call void @llvm.memmove.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  tail call void @llvm.memmove.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -81,7 +81,7 @@ define i32 @test_memmove(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR:      %2:gpr64 = MOVi64imm -6148914691236517206
 ; MIR-NEXT: STRXui %2, %0, 1 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: STRXui %2, %0, 0 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_memset(i32* nocapture %p, i32* nocapture readonly %q) {
+define i32 @test_memset(ptr nocapture %p, ptr nocapture readonly %q) {
 ; CHECK-LABEL: test_memset:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w10, w11, [x1]
@@ -90,11 +90,11 @@ define i32 @test_memset(i32* nocapture %p, i32* nocapture readonly %q) {
 ; CHECK-NEXT:    stp x9, x9, [x8]
 ; CHECK-NEXT:    add w0, w10, w11
 ; CHECK-NEXT:    ret
-  %p0 = bitcast i32* %p to i8*
-  tail call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+  %p0 = bitcast ptr %p to ptr
+  tail call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
@@ -102,7 +102,7 @@ define i32 @test_memset(i32* nocapture %p, i32* nocapture readonly %q) {
 ; MIR-LABEL: name: test_mempcpy
 ; MIR:      %2:fpr128 = LDRQui %0, 1 :: (load (s128) from %ir.p1, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR-NEXT: STRQui killed %2, %0, 0 :: (store (s128) into %ir.p0, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
-define i32 @test_mempcpy(i32* nocapture %p, i32* nocapture readonly %q) {
+define i32 @test_mempcpy(ptr nocapture %p, ptr nocapture readonly %q) {
 ; CHECK-LABEL: test_mempcpy:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp w9, w10, [x1]
@@ -111,23 +111,23 @@ define i32 @test_mempcpy(i32* nocapture %p, i32* nocapture readonly %q) {
 ; CHECK-NEXT:    add w0, w9, w10
 ; CHECK-NEXT:    str q0, [x8]
 ; CHECK-NEXT:    ret
-  %p0 = bitcast i32* %p to i8*
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 4
-  %p1 = bitcast i32* %add.ptr to i8*
-  %call = tail call i8* @mempcpy(i8* noundef nonnull align 4 dereferenceable(16) %p0, i8* noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !2, !noalias !4
-  %v0 = load i32, i32* %q, align 4, !alias.scope !4, !noalias !2
-  %q1 = getelementptr inbounds i32, i32* %q, i64 1
-  %v1 = load i32, i32* %q1, align 4, !alias.scope !4, !noalias !2
+  %p0 = bitcast ptr %p to ptr
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 4
+  %p1 = bitcast ptr %add.ptr to ptr
+  %call = tail call ptr @mempcpy(ptr noundef nonnull align 4 dereferenceable(16) %p0, ptr noundef nonnull align 4 dereferenceable(16) %p1, i64 16), !alias.scope !2, !noalias !4
+  %v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
+  %q1 = getelementptr inbounds i32, ptr %q, i64 1
+  %v1 = load i32, ptr %q1, align 4, !alias.scope !4, !noalias !2
   %add = add i32 %v0, %v1
   ret i32 %add
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg)
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
-declare i8* @mempcpy(i8*, i8*, i64)
+declare ptr @mempcpy(ptr, ptr, i64)
 
 !0 = distinct !{!0, !"bax"}
 !1 = distinct !{!1, !0, !"bax: %p"}

diff  --git a/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll b/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll
index d3ca29a3ad6c4..871bc3b0a3237 100644
--- a/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll
+++ b/llvm/test/CodeGen/AArch64/merge-scoped-aa-store.ll
@@ -12,23 +12,23 @@
 ; MIR-DAG: ![[SET2:[0-9]+]] = !{![[SCOPE2]]}
 ; MIR-DAG: ![[SET3:[0-9]+]] = !{![[SCOPE3]]}
 
-define void @blam0(<3 x float>* %g0, <3 x float>* %g1) {
+define void @blam0(ptr %g0, ptr %g1) {
 ; MIR-LABEL: name: blam0
 ; MIR: LDRDui %0, 0 :: (load (s64) from %ir.g0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR: STRDui killed %5, %1, 0 :: (store (s64) into %ir.tmp41, align 4, !alias.scope ![[SET1]], !noalias ![[SET0]])
-  %tmp4 = getelementptr inbounds <3 x float>, <3 x float>* %g1, i64 0, i64 0
-  %tmp5 = load <3 x float>, <3 x float>* %g0, align 4, !alias.scope !0, !noalias !1
+  %tmp4 = getelementptr inbounds <3 x float>, ptr %g1, i64 0, i64 0
+  %tmp5 = load <3 x float>, ptr %g0, align 4, !alias.scope !0, !noalias !1
   %tmp6 = extractelement <3 x float> %tmp5, i64 0
-  store float %tmp6, float* %tmp4, align 4, !alias.scope !1, !noalias !0
-  %tmp7 = getelementptr inbounds float, float* %tmp4, i64 1
-  %tmp8 = load <3 x float>, <3 x float>* %g0, align 4, !alias.scope !0, !noalias !1
+  store float %tmp6, ptr %tmp4, align 4, !alias.scope !1, !noalias !0
+  %tmp7 = getelementptr inbounds float, ptr %tmp4, i64 1
+  %tmp8 = load <3 x float>, ptr %g0, align 4, !alias.scope !0, !noalias !1
   %tmp9 = extractelement <3 x float> %tmp8, i64 1
-  store float %tmp9, float* %tmp7, align 4, !alias.scope !1, !noalias !0
+  store float %tmp9, ptr %tmp7, align 4, !alias.scope !1, !noalias !0
   ret void;
 }
 
 ; Ensure new scoped AA metadata are calculated after merging stores.
-define void @blam1(<3 x float>* %g0, <3 x float>* %g1) {
+define void @blam1(ptr %g0, ptr %g1) {
 ; MIR-LABEL: name: blam1
 ; MIR: machineMetadataNodes:
 ; MIR-DAG: ![[MMSET0:[0-9]+]] = !{![[SCOPE2]], ![[SCOPE1]]}
@@ -36,14 +36,14 @@ define void @blam1(<3 x float>* %g0, <3 x float>* %g1) {
 ; MIR: body:
 ; MIR: LDRDui %0, 0 :: (load (s64) from %ir.g0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
 ; MIR: STRDui killed %5, %1, 0 :: (store (s64) into %ir.tmp41, align 4, !alias.scope ![[MMSET0]], !noalias ![[MMSET1]])
-  %tmp4 = getelementptr inbounds <3 x float>, <3 x float>* %g1, i64 0, i64 0
-  %tmp5 = load <3 x float>, <3 x float>* %g0, align 4, !alias.scope !0, !noalias !1
+  %tmp4 = getelementptr inbounds <3 x float>, ptr %g1, i64 0, i64 0
+  %tmp5 = load <3 x float>, ptr %g0, align 4, !alias.scope !0, !noalias !1
   %tmp6 = extractelement <3 x float> %tmp5, i64 0
-  store float %tmp6, float* %tmp4, align 4, !alias.scope !1, !noalias !0
-  %tmp7 = getelementptr inbounds float, float* %tmp4, i64 1
-  %tmp8 = load <3 x float>, <3 x float>* %g0, align 4, !alias.scope !0, !noalias !1
+  store float %tmp6, ptr %tmp4, align 4, !alias.scope !1, !noalias !0
+  %tmp7 = getelementptr inbounds float, ptr %tmp4, i64 1
+  %tmp8 = load <3 x float>, ptr %g0, align 4, !alias.scope !0, !noalias !1
   %tmp9 = extractelement <3 x float> %tmp8, i64 1
-  store float %tmp9, float* %tmp7, align 4, !alias.scope !5, !noalias !6
+  store float %tmp9, ptr %tmp7, align 4, !alias.scope !5, !noalias !6
   ret void;
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/merge-store.ll b/llvm/test/CodeGen/AArch64/merge-store.ll
index 716f84543cc76..f7201030da2c4 100644
--- a/llvm/test/CodeGen/AArch64/merge-store.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store.ll
@@ -23,14 +23,14 @@ define void @blam() {
 ; MISALIGNED-NEXT:    add x8, x8, :lo12:g1
 ; MISALIGNED-NEXT:    str d0, [x8]
 ; MISALIGNED-NEXT:    ret
-  %tmp4 = getelementptr inbounds <3 x float>, <3 x float>* @g1, i64 0, i64 0
-  %tmp5 = load <3 x float>, <3 x float>* @g0, align 16
+  %tmp4 = getelementptr inbounds <3 x float>, ptr @g1, i64 0, i64 0
+  %tmp5 = load <3 x float>, ptr @g0, align 16
   %tmp6 = extractelement <3 x float> %tmp5, i64 0
-  store float %tmp6, float* %tmp4
-  %tmp7 = getelementptr inbounds float, float* %tmp4, i64 1
-  %tmp8 = load <3 x float>, <3 x float>* @g0, align 16
+  store float %tmp6, ptr %tmp4
+  %tmp7 = getelementptr inbounds float, ptr %tmp4, i64 1
+  %tmp8 = load <3 x float>, ptr @g0, align 16
   %tmp9 = extractelement <3 x float> %tmp8, i64 1
-  store float %tmp9, float* %tmp7
+  store float %tmp9, ptr %tmp7
   ret void;
 }
 
@@ -41,7 +41,7 @@ define void @blam() {
 ; unaligned 16-byte stores are slow. This test would infinite loop when
 ; the fastness of unaligned accesses was not specified correctly.
 
-define void @merge_vec_extract_stores(<4 x float> %v1, <2 x float>* %ptr) {
+define void @merge_vec_extract_stores(<4 x float> %v1, ptr %ptr) {
 ; SPLITTING-LABEL: merge_vec_extract_stores:
 ; SPLITTING:       // %bb.0:
 ; SPLITTING-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
@@ -53,14 +53,14 @@ define void @merge_vec_extract_stores(<4 x float> %v1, <2 x float>* %ptr) {
 ; MISALIGNED:       // %bb.0:
 ; MISALIGNED-NEXT:    stur q0, [x0, #24]
 ; MISALIGNED-NEXT:    ret
-  %idx0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3
-  %idx1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 4
+  %idx0 = getelementptr inbounds <2 x float>, ptr %ptr, i64 3
+  %idx1 = getelementptr inbounds <2 x float>, ptr %ptr, i64 4
 
   %shuffle0 = shufflevector <4 x float> %v1, <4 x float> undef, <2 x i32> <i32 0, i32 1>
   %shuffle1 = shufflevector <4 x float> %v1, <4 x float> undef, <2 x i32> <i32 2, i32 3>
 
-  store <2 x float> %shuffle0, <2 x float>* %idx0, align 8
-  store <2 x float> %shuffle1, <2 x float>* %idx1, align 8
+  store <2 x float> %shuffle0, ptr %idx0, align 8
+  store <2 x float> %shuffle1, ptr %idx1, align 8
   ret void
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:

diff  --git a/llvm/test/CodeGen/AArch64/multi-vector-load-size.ll b/llvm/test/CodeGen/AArch64/multi-vector-load-size.ll
index ecb953366a88e..97dc8f569a41a 100644
--- a/llvm/test/CodeGen/AArch64/multi-vector-load-size.ll
+++ b/llvm/test/CodeGen/AArch64/multi-vector-load-size.ll
@@ -4,103 +4,103 @@
 %struct.__neon_float32x2x3_t = type { <2 x float>,  <2 x float>,  <2 x float> }
 %struct.__neon_float32x2x4_t = type { <2 x float>,  <2 x float>, <2 x float>,  <2 x float> }
 
-declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2.v2f32.p0f32(float*)
-declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3.v2f32.p0f32(float*)
-declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4.v2f32.p0f32(float*)
+declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2.v2f32.p0(ptr)
+declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3.v2f32.p0(ptr)
+declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4.v2f32.p0(ptr)
 
-declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float*)
-declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float*)
-declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float*)
+declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0(ptr)
+declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0(ptr)
+declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0(ptr)
 
-declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2r.v2f32.p0f32(float*)
-declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3r.v2f32.p0f32(float*)
-declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4r.v2f32.p0f32(float*)
+declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2r.v2f32.p0(ptr)
+declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3r.v2f32.p0(ptr)
+declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4r.v2f32.p0(ptr)
 
-declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float>, <2 x float>, i64, float*)
-declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, i64, float*)
-declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, float*)
+declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2lane.v2f32.p0(<2 x float>, <2 x float>, i64, ptr)
+declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3lane.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, i64, ptr)
+declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4lane.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, ptr)
 
 
-define %struct.__neon_float32x2x2_t @test_ld2(float* %addr) {
+define %struct.__neon_float32x2x2_t @test_ld2(ptr %addr) {
   ; CHECK-LABEL: name: test_ld2
   ; CHECK: LD2Twov2s {{.*}} :: (load (s128) {{.*}})
-  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x2_t %val
 }
 
-define %struct.__neon_float32x2x3_t @test_ld3(float* %addr) {
+define %struct.__neon_float32x2x3_t @test_ld3(ptr %addr) {
   ; CHECK-LABEL: name: test_ld3
   ; CHECK: LD3Threev2s {{.*}} :: (load (s192) {{.*}})
-  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x3_t %val
 }
 
-define %struct.__neon_float32x2x4_t @test_ld4(float* %addr) {
+define %struct.__neon_float32x2x4_t @test_ld4(ptr %addr) {
   ; CHECK-LABEL: name: test_ld4
   ; CHECK: LD4Fourv2s {{.*}} :: (load (s256) {{.*}})
-  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x4_t %val
 }
 
-define %struct.__neon_float32x2x2_t @test_ld1x2(float* %addr) {
+define %struct.__neon_float32x2x2_t @test_ld1x2(ptr %addr) {
   ; CHECK-LABEL: name: test_ld1x2
   ; CHECK: LD1Twov2s {{.*}} :: (load (s128) {{.*}})
-  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x2_t %val
 }
 
-define %struct.__neon_float32x2x3_t @test_ld1x3(float* %addr) {
+define %struct.__neon_float32x2x3_t @test_ld1x3(ptr %addr) {
   ; CHECK-LABEL: name: test_ld1x3
   ; CHECK: LD1Threev2s {{.*}} :: (load (s192) {{.*}})
-  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x3_t %val
 }
 
-define %struct.__neon_float32x2x4_t @test_ld1x4(float* %addr) {
+define %struct.__neon_float32x2x4_t @test_ld1x4(ptr %addr) {
   ; CHECK-LABEL: name: test_ld1x4
   ; CHECK: LD1Fourv2s {{.*}} :: (load (s256) {{.*}})
-  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x4_t %val
 }
 
-define %struct.__neon_float32x2x2_t @test_ld2r(float* %addr) {
+define %struct.__neon_float32x2x2_t @test_ld2r(ptr %addr) {
   ; CHECK-LABEL: name: test_ld2r
   ; CHECK: LD2Rv2s {{.*}} :: (load (s64) {{.*}})
-  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2r.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2r.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x2_t %val
 }
 
-define %struct.__neon_float32x2x3_t @test_ld3r(float* %addr) {
+define %struct.__neon_float32x2x3_t @test_ld3r(ptr %addr) {
   ; CHECK-LABEL: name: test_ld3r
   ; CHECK: LD3Rv2s {{.*}} :: (load (s96) {{.*}})
-  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3r.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3r.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x3_t %val
 }
 
-define %struct.__neon_float32x2x4_t @test_ld4r(float* %addr) {
+define %struct.__neon_float32x2x4_t @test_ld4r(ptr %addr) {
   ; CHECK-LABEL: name: test_ld4r
   ; CHECK: LD4Rv2s {{.*}} :: (load (s128) {{.*}})
-  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4r.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4r.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x4_t %val
 }
 
-define %struct.__neon_float32x2x2_t @test_ld2lane(<2 x float> %a, <2 x float> %b, float* %addr) {
+define %struct.__neon_float32x2x2_t @test_ld2lane(<2 x float> %a, <2 x float> %b, ptr %addr) {
   ; CHECK-LABEL: name: test_ld2lane
   ; CHECK: {{.*}} LD2i32 {{.*}}
-  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float> %a, <2 x float> %b, i64 1, float* %addr)
+  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld2lane.v2f32.p0(<2 x float> %a, <2 x float> %b, i64 1, ptr %addr)
   ret %struct.__neon_float32x2x2_t %val
 }
 
-define %struct.__neon_float32x2x3_t @test_ld3lane(<2 x float> %a, <2 x float> %b, <2 x float> %c, float* %addr) {
+define %struct.__neon_float32x2x3_t @test_ld3lane(<2 x float> %a, <2 x float> %b, <2 x float> %c, ptr %addr) {
   ; CHECK-LABEL: name: test_ld3lane
   ; CHECK: {{.*}} LD3i32 {{.*}}
-  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, i64 1, float* %addr)
+  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld3lane.v2f32.p0(<2 x float> %a, <2 x float> %b, <2 x float> %c, i64 1, ptr %addr)
   ret %struct.__neon_float32x2x3_t %val
 }
 
-define %struct.__neon_float32x2x4_t @test_ld4lane(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d, float* %addr) {
+define %struct.__neon_float32x2x4_t @test_ld4lane(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d, ptr %addr) {
   ; CHECK-LABEL: name: test_ld4lane
   ; CHECK: {{.*}} LD4i32 {{.*}}
-  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d, i64 1, float* %addr)
+  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld4lane.v2f32.p0(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d, i64 1, ptr %addr)
   ret %struct.__neon_float32x2x4_t %val
 }
\ No newline at end of file

diff  --git a/llvm/test/CodeGen/AArch64/nontemporal-load.ll b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
index 0c2e3916426f5..e93fb40897078 100644
--- a/llvm/test/CodeGen/AArch64/nontemporal-load.ll
+++ b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
@@ -624,7 +624,7 @@ define <16 x double> @test_ldnp_v16f64(ptr %A) {
   ret <16 x double> %lv
 }
 
-define <vscale x 20 x float> @test_ldnp_v20f32_vscale(<vscale x 20 x float>* %A) {
+define <vscale x 20 x float> @test_ldnp_v20f32_vscale(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v20f32_vscale:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -644,7 +644,7 @@ define <vscale x 20 x float> @test_ldnp_v20f32_vscale(<vscale x 20 x float>* %A)
 ; CHECK-BE-NEXT:    ld1w { z3.s }, p0/z, [x0, #3, mul vl]
 ; CHECK-BE-NEXT:    ld1w { z4.s }, p0/z, [x0, #4, mul vl]
 ; CHECK-BE-NEXT:    ret
-  %lv = load<vscale x 20 x float>, <vscale x 20 x float>* %A, align 8, !nontemporal !0
+  %lv = load<vscale x 20 x float>, ptr %A, align 8, !nontemporal !0
   ret <vscale x 20 x float> %lv
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll b/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll
index c93864cf73d2c..1adeeabd00fc1 100644
--- a/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll
+++ b/llvm/test/CodeGen/AArch64/pre-indexed-addrmode-with-constant-offset.ll
@@ -3,7 +3,7 @@
 ; Reduced test from https://github.com/llvm/llvm-project/issues/60645.
 ; To check that we are generating -32 as offset for the first store.
 
-define i8* @pr60645(i8* %ptr, i64 %t0) {
+define ptr @pr60645(ptr %ptr, i64 %t0) {
 ; CHECK-LABEL: pr60645:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, x1, lsl #2
@@ -12,13 +12,13 @@ define i8* @pr60645(i8* %ptr, i64 %t0) {
 ; CHECK-NEXT:    ret
   %t1 = add nuw nsw i64 %t0, 8
   %t2 = mul i64 %t1, -4
-  %t3 = getelementptr i8, i8* %ptr, i64 %t2
-  %t4 = bitcast i8* %t3 to i32*
-  store i32 0, i32* %t4, align 4
+  %t3 = getelementptr i8, ptr %ptr, i64 %t2
+  %t4 = bitcast ptr %t3 to ptr
+  store i32 0, ptr %t4, align 4
   %t5 = shl i64 %t1, 2
   %t6 = sub nuw nsw i64 -8, %t5
-  %t7 = getelementptr i8, i8* %ptr, i64 %t6
-  %t8 = bitcast i8* %t7 to i32*
-  store i32 0, i32* %t8, align 4
-  ret i8* %ptr
+  %t7 = getelementptr i8, ptr %ptr, i64 %t6
+  %t8 = bitcast ptr %t7 to ptr
+  store i32 0, ptr %t8, align 4
+  ret ptr %ptr
 }

diff  --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index 178336870373e..932b230726a3a 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -273,14 +273,14 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %28 = phi <2 x i64> [ undef, %entry ], [ %41, %for.cond1.preheader ]
   %29 = phi <2 x i64> [ undef, %entry ], [ %39, %for.cond1.preheader ]
   %indvars.iv40 = phi i64 [ 0, %entry ], [ %indvars.iv.next41, %for.cond1.preheader ]
-  %30 = load <2 x i64>, <2 x i64>* null, align 8
-  %31 = load <2 x i64>, <2 x i64>* undef, align 8
-  %arrayidx14.4.phi.trans.insert = getelementptr inbounds [8 x [8 x i64]], [8 x [8 x i64]]* @B, i64 0, i64 %indvars.iv40, i64 4
-  %32 = load <2 x i64>, <2 x i64>* null, align 8
-  %arrayidx14.6.phi.trans.insert = getelementptr inbounds [8 x [8 x i64]], [8 x [8 x i64]]* @B, i64 0, i64 %indvars.iv40, i64 6
-  %33 = bitcast i64* %arrayidx14.6.phi.trans.insert to <2 x i64>*
-  %34 = load <2 x i64>, <2 x i64>* %33, align 8
-  %35 = load i64, i64* null, align 8
+  %30 = load <2 x i64>, ptr null, align 8
+  %31 = load <2 x i64>, ptr undef, align 8
+  %arrayidx14.4.phi.trans.insert = getelementptr inbounds [8 x [8 x i64]], ptr @B, i64 0, i64 %indvars.iv40, i64 4
+  %32 = load <2 x i64>, ptr null, align 8
+  %arrayidx14.6.phi.trans.insert = getelementptr inbounds [8 x [8 x i64]], ptr @B, i64 0, i64 %indvars.iv40, i64 6
+  %33 = bitcast ptr %arrayidx14.6.phi.trans.insert to ptr
+  %34 = load <2 x i64>, ptr %33, align 8
+  %35 = load i64, ptr null, align 8
   %36 = insertelement <2 x i64> undef, i64 %35, i32 0
   %37 = shufflevector <2 x i64> %36, <2 x i64> undef, <2 x i32> zeroinitializer
   %38 = mul nsw <2 x i64> %30, %37
@@ -291,7 +291,7 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %43 = add nsw <2 x i64> %27, %42
   %44 = mul nsw <2 x i64> %34, %37
   %45 = add nsw <2 x i64> %26, %44
-  %46 = load i64, i64* undef, align 8
+  %46 = load i64, ptr undef, align 8
   %47 = insertelement <2 x i64> undef, i64 %46, i32 0
   %48 = shufflevector <2 x i64> %47, <2 x i64> undef, <2 x i32> zeroinitializer
   %49 = mul nsw <2 x i64> %30, %48
@@ -302,8 +302,8 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %54 = add nsw <2 x i64> %23, %53
   %55 = mul nsw <2 x i64> %34, %48
   %56 = add nsw <2 x i64> %22, %55
-  %arrayidx10.2 = getelementptr inbounds [8 x [8 x i64]], [8 x [8 x i64]]* @A, i64 0, i64 2, i64 %indvars.iv40
-  %57 = load i64, i64* %arrayidx10.2, align 8
+  %arrayidx10.2 = getelementptr inbounds [8 x [8 x i64]], ptr @A, i64 0, i64 2, i64 %indvars.iv40
+  %57 = load i64, ptr %arrayidx10.2, align 8
   %58 = insertelement <2 x i64> undef, i64 %57, i32 0
   %59 = shufflevector <2 x i64> %58, <2 x i64> undef, <2 x i32> zeroinitializer
   %60 = mul nsw <2 x i64> %31, %59
@@ -312,7 +312,7 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %63 = add nsw <2 x i64> %20, %62
   %64 = mul nsw <2 x i64> %34, %59
   %65 = add nsw <2 x i64> %19, %64
-  %66 = load i64, i64* undef, align 8
+  %66 = load i64, ptr undef, align 8
   %67 = insertelement <2 x i64> undef, i64 %66, i32 0
   %68 = shufflevector <2 x i64> %67, <2 x i64> undef, <2 x i32> zeroinitializer
   %69 = mul nsw <2 x i64> %30, %68
@@ -321,7 +321,7 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %72 = add nsw <2 x i64> %17, %71
   %73 = mul nsw <2 x i64> %34, %68
   %74 = add nsw <2 x i64> %16, %73
-  %75 = load i64, i64* undef, align 8
+  %75 = load i64, ptr undef, align 8
   %76 = insertelement <2 x i64> undef, i64 %75, i32 0
   %77 = shufflevector <2 x i64> %76, <2 x i64> undef, <2 x i32> zeroinitializer
   %78 = mul nsw <2 x i64> %30, %77
@@ -332,7 +332,7 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %83 = add nsw <2 x i64> %13, %82
   %84 = mul nsw <2 x i64> %34, %77
   %85 = add nsw <2 x i64> %12, %84
-  %86 = load i64, i64* undef, align 8
+  %86 = load i64, ptr undef, align 8
   %87 = insertelement <2 x i64> undef, i64 %86, i32 0
   %88 = shufflevector <2 x i64> %87, <2 x i64> undef, <2 x i32> zeroinitializer
   %89 = mul nsw <2 x i64> %30, %88
@@ -343,7 +343,7 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %94 = add nsw <2 x i64> %9, %93
   %95 = mul nsw <2 x i64> %34, %88
   %96 = add nsw <2 x i64> %8, %95
-  %97 = load i64, i64* undef, align 8
+  %97 = load i64, ptr undef, align 8
   %98 = insertelement <2 x i64> undef, i64 %97, i32 0
   %99 = shufflevector <2 x i64> %98, <2 x i64> undef, <2 x i32> zeroinitializer
   %100 = mul nsw <2 x i64> %30, %99
@@ -354,7 +354,7 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   %105 = add nsw <2 x i64> %5, %104
   %106 = mul nsw <2 x i64> %34, %99
   %107 = add nsw <2 x i64> %4, %106
-  %108 = load i64, i64* undef, align 8
+  %108 = load i64, ptr undef, align 8
   %109 = insertelement <2 x i64> undef, i64 %108, i32 0
   %110 = shufflevector <2 x i64> %109, <2 x i64> undef, <2 x i32> zeroinitializer
   %111 = mul nsw <2 x i64> %30, %110
@@ -370,35 +370,35 @@ for.cond1.preheader:                              ; preds = %for.cond1.preheader
   br i1 %exitcond42, label %for.cond.cleanup, label %for.cond1.preheader
 
 for.cond.cleanup:                                 ; preds = %for.cond1.preheader
-  store <2 x i64> %39, <2 x i64>* bitcast ([8 x [8 x i64]]* @C to <2 x i64>*), align 8
-  store <2 x i64> %41, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 0, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %43, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 0, i64 4) to <2 x i64>*), align 8
-  store <2 x i64> %45, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 0, i64 6) to <2 x i64>*), align 8
-  store <2 x i64> %50, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 1, i64 0) to <2 x i64>*), align 8
-  store <2 x i64> %52, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 1, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %54, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 1, i64 4) to <2 x i64>*), align 8
-  store <2 x i64> %56, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 1, i64 6) to <2 x i64>*), align 8
-  store <2 x i64> %61, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 2, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %63, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 2, i64 4) to <2 x i64>*), align 8
-  store <2 x i64> %65, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 2, i64 6) to <2 x i64>*), align 8
-  store <2 x i64> %70, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 3, i64 0) to <2 x i64>*), align 8
-  store <2 x i64> %72, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 3, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %74, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 3, i64 6) to <2 x i64>*), align 8
-  store <2 x i64> %79, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 4, i64 0) to <2 x i64>*), align 8
-  store <2 x i64> %81, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 4, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %83, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 4, i64 4) to <2 x i64>*), align 8
-  store <2 x i64> %85, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 4, i64 6) to <2 x i64>*), align 8
-  store <2 x i64> %90, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 5, i64 0) to <2 x i64>*), align 8
-  store <2 x i64> %92, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 5, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %94, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 5, i64 4) to <2 x i64>*), align 8
-  store <2 x i64> %96, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 5, i64 6) to <2 x i64>*), align 8
-  store <2 x i64> %101, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 6, i64 0) to <2 x i64>*), align 8
-  store <2 x i64> %103, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 6, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %105, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 6, i64 4) to <2 x i64>*), align 8
-  store <2 x i64> %107, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 6, i64 6) to <2 x i64>*), align 8
-  store <2 x i64> %112, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 7, i64 0) to <2 x i64>*), align 8
-  store <2 x i64> %114, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 7, i64 2) to <2 x i64>*), align 8
-  store <2 x i64> %116, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 7, i64 4) to <2 x i64>*), align 8
-  store <2 x i64> %118, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x [8 x i64]], [8 x [8 x i64]]* @C, i64 0, i64 7, i64 6) to <2 x i64>*), align 8
+  store <2 x i64> %39, ptr @C, align 8
+  store <2 x i64> %41, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 0, i64 2), align 8
+  store <2 x i64> %43, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 0, i64 4), align 8
+  store <2 x i64> %45, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 0, i64 6), align 8
+  store <2 x i64> %50, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 1, i64 0), align 8
+  store <2 x i64> %52, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 1, i64 2), align 8
+  store <2 x i64> %54, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 1, i64 4), align 8
+  store <2 x i64> %56, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 1, i64 6), align 8
+  store <2 x i64> %61, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 2, i64 2), align 8
+  store <2 x i64> %63, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 2, i64 4), align 8
+  store <2 x i64> %65, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 2, i64 6), align 8
+  store <2 x i64> %70, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 3, i64 0), align 8
+  store <2 x i64> %72, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 3, i64 2), align 8
+  store <2 x i64> %74, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 3, i64 6), align 8
+  store <2 x i64> %79, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 4, i64 0), align 8
+  store <2 x i64> %81, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 4, i64 2), align 8
+  store <2 x i64> %83, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 4, i64 4), align 8
+  store <2 x i64> %85, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 4, i64 6), align 8
+  store <2 x i64> %90, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 5, i64 0), align 8
+  store <2 x i64> %92, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 5, i64 2), align 8
+  store <2 x i64> %94, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 5, i64 4), align 8
+  store <2 x i64> %96, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 5, i64 6), align 8
+  store <2 x i64> %101, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 6, i64 0), align 8
+  store <2 x i64> %103, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 6, i64 2), align 8
+  store <2 x i64> %105, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 6, i64 4), align 8
+  store <2 x i64> %107, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 6, i64 6), align 8
+  store <2 x i64> %112, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 7, i64 0), align 8
+  store <2 x i64> %114, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 7, i64 2), align 8
+  store <2 x i64> %116, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 7, i64 4), align 8
+  store <2 x i64> %118, ptr getelementptr inbounds ([8 x [8 x i64]], ptr @C, i64 0, i64 7, i64 6), align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/rvmarker-pseudo-expansion-and-outlining.mir b/llvm/test/CodeGen/AArch64/rvmarker-pseudo-expansion-and-outlining.mir
index 4c2f002d37d25..a3cab0c1afead 100644
--- a/llvm/test/CodeGen/AArch64/rvmarker-pseudo-expansion-and-outlining.mir
+++ b/llvm/test/CodeGen/AArch64/rvmarker-pseudo-expansion-and-outlining.mir
@@ -41,7 +41,7 @@
 
   declare void @cb2()
 
-  declare i8* @attachedcall()
+  declare ptr @attachedcall()
 ...
 ---
 name:            fn1

diff  --git a/llvm/test/CodeGen/AArch64/sched-movprfx.ll b/llvm/test/CodeGen/AArch64/sched-movprfx.ll
index f625aa6422107..9e88d1659d45f 100644
--- a/llvm/test/CodeGen/AArch64/sched-movprfx.ll
+++ b/llvm/test/CodeGen/AArch64/sched-movprfx.ll
@@ -7,7 +7,7 @@
 
 
 ; NOTE: The unused paramter ensures z0/z1 is free, avoiding the antidependence for schedule.
-define <vscale x 2 x i64> @and_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i64>* %base) {
+define <vscale x 2 x i64> @and_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, ptr %base) {
 ; CHECK-LABEL: and_i64_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
@@ -17,7 +17,7 @@ define <vscale x 2 x i64> @and_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 ; CHECK-NEXT:    add z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %data0 = tail call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %c, i1 0)
-  %data1 = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base,
+  %data1 = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %base,
                                                             i32 1,
                                                             <vscale x 2 x i1> %pg,
                                                             <vscale x 2 x i64> undef)
@@ -26,4 +26,4 @@ define <vscale x 2 x i64> @and_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64
 }
 
 declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
-declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>*, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/settag-merge.mir b/llvm/test/CodeGen/AArch64/settag-merge.mir
index 03524e780df02..1eb23ef9cba75 100644
--- a/llvm/test/CodeGen/AArch64/settag-merge.mir
+++ b/llvm/test/CodeGen/AArch64/settag-merge.mir
@@ -1,17 +1,17 @@
 # RUN: llc -mtriple=aarch64 -mattr=+mte -run-pass=prologepilog %s -o - | FileCheck %s
 
 --- |
-  declare void @llvm.aarch64.settag(i8* nocapture writeonly, i64) argmemonly nounwind writeonly "target-features"="+mte"
+  declare void @llvm.aarch64.settag(ptr nocapture writeonly, i64) argmemonly nounwind writeonly "target-features"="+mte"
   define i32 @stg16_16_16_16_ret() "target-features"="+mte" {
   entry:
     %a = alloca i8, i32 16, align 16
     %b = alloca i8, i32 16, align 16
     %c = alloca i8, i32 16, align 16
     %d = alloca i8, i32 16, align 16
-    call void @llvm.aarch64.settag(i8* %a, i64 16)
-    call void @llvm.aarch64.settag(i8* %b, i64 16)
-    call void @llvm.aarch64.settag(i8* %c, i64 16)
-    call void @llvm.aarch64.settag(i8* %d, i64 16)
+    call void @llvm.aarch64.settag(ptr %a, i64 16)
+    call void @llvm.aarch64.settag(ptr %b, i64 16)
+    call void @llvm.aarch64.settag(ptr %c, i64 16)
+    call void @llvm.aarch64.settag(ptr %d, i64 16)
     ret i32 0
   }
 
@@ -19,9 +19,9 @@
   entry:
     %a = alloca i8, i32 16, align 16
     %b = alloca i8, i32 128, align 16
-    call void @llvm.aarch64.settag(i8* %a, i64 16)
-    store i8 42, i8* %a
-    call void @llvm.aarch64.settag(i8* %b, i64 128)
+    call void @llvm.aarch64.settag(ptr %a, i64 16)
+    store i8 42, ptr %a
+    call void @llvm.aarch64.settag(ptr %b, i64 128)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
index 6d534060293d6..25f3540766618 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll
@@ -166,7 +166,7 @@ define <vscale x 8 x half> @extract_f16(<vscale x 8 x half> %zd, <vscale x 8 x i
   ret <vscale x 8 x half> %z0
 }
 
-define <vscale x 8 x bfloat> @extract_bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32 %tileslice, <vscale x 8 x bfloat> *%ptr) {
+define <vscale x 8 x bfloat> @extract_bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32 %tileslice, ptr %ptr) {
 ; CHECK-LABEL: extract_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, z0.d

diff  --git a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.mir b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.mir
index d7a5a7e3766b4..d2e06d65e3284 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.mir
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.mir
@@ -10,10 +10,10 @@
 # Check that the SLS hardening pass also protects BRA* indirect branches that
 # llvm currently does not generate.
 --- |
-  @ptr_aa = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@br_aa, %return), i8* blockaddress(@br_aa, %l2)], align 8
-  @ptr_aaz = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@br_aaz, %return), i8* blockaddress(@br_aaz, %l2)], align 8
-  @ptr_ab = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@br_ab, %return), i8* blockaddress(@br_ab, %l2)], align 8
-  @ptr_abz = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@br_abz, %return), i8* blockaddress(@br_abz, %l2)], align 8
+  @ptr_aa = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@br_aa, %return), ptr blockaddress(@br_aa, %l2)], align 8
+  @ptr_aaz = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@br_aaz, %return), ptr blockaddress(@br_aaz, %l2)], align 8
+  @ptr_ab = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@br_ab, %return), ptr blockaddress(@br_ab, %l2)], align 8
+  @ptr_abz = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@br_abz, %return), ptr blockaddress(@br_abz, %l2)], align 8
 
   define dso_local i32 @br_aa(i32 %a, i32 %b, i32 %i) {
   entry:

diff  --git a/llvm/test/CodeGen/AArch64/speculation-hardening.mir b/llvm/test/CodeGen/AArch64/speculation-hardening.mir
index 407ef2e0e905b..1e5fafb7242b8 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening.mir
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening.mir
@@ -31,7 +31,7 @@
   define void @indirect_call_x17(i32 %a, i32 %b) speculative_load_hardening {
    ret void
   }
-  @g = common dso_local local_unnamed_addr global i64 (...)* null, align 8
+  @g = common dso_local local_unnamed_addr global ptr null, align 8
   define void @indirect_tailcall_x17(i32 %a, i32 %b) speculative_load_hardening {
    ret void
   }

diff  --git a/llvm/test/CodeGen/AArch64/spillfill-sve.ll b/llvm/test/CodeGen/AArch64/spillfill-sve.ll
index 135d6f644fc3d..64e8dfc4fe907 100644
--- a/llvm/test/CodeGen/AArch64/spillfill-sve.ll
+++ b/llvm/test/CodeGen/AArch64/spillfill-sve.ll
@@ -11,8 +11,8 @@ define void @fill_nxv16i8() {
 ; CHECK-DAG: ld1b    { z{{[01]}}.b }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 16 x i8>
   %local1 = alloca <vscale x 16 x i8>
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local0
-  load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local1
+  load volatile <vscale x 16 x i8>, ptr %local0
+  load volatile <vscale x 16 x i8>, ptr %local1
   ret void
 }
 
@@ -22,8 +22,8 @@ define void @fill_nxv8i8() {
 ; CHECK-DAG: ld1b    { z{{[01]}}.h }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x i8>
   %local1 = alloca <vscale x 8 x i8>
-  load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %local0
-  load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %local1
+  load volatile <vscale x 8 x i8>, ptr %local0
+  load volatile <vscale x 8 x i8>, ptr %local1
   ret void
 }
 
@@ -33,9 +33,9 @@ define <vscale x 8 x i16> @fill_signed_nxv8i8() {
 ; CHECK-DAG: ld1sb    { z{{[01]}}.h }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x i8>
   %local1 = alloca <vscale x 8 x i8>
-  %a = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %local0
+  %a = load volatile <vscale x 8 x i8>, ptr %local0
   %a_ext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
-  %b = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %local1
+  %b = load volatile <vscale x 8 x i8>, ptr %local1
   %b_ext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
   %sum = add <vscale x 8 x i16> %a_ext, %b_ext
   ret <vscale x 8 x i16> %sum
@@ -47,8 +47,8 @@ define void @fill_nxv4i8() {
 ; CHECK-DAG: ld1b    { z{{[01]}}.s }, p0/z, [sp, #2, mul vl]
   %local0 = alloca <vscale x 4 x i8>
   %local1 = alloca <vscale x 4 x i8>
-  load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %local0
-  load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %local1
+  load volatile <vscale x 4 x i8>, ptr %local0
+  load volatile <vscale x 4 x i8>, ptr %local1
   ret void
 }
 
@@ -58,9 +58,9 @@ define <vscale x 4 x i32> @fill_signed_nxv4i8() {
 ; CHECK-DAG: ld1sb    { z{{[01]}}.s }, p0/z, [sp, #2, mul vl]
   %local0 = alloca <vscale x 4 x i8>
   %local1 = alloca <vscale x 4 x i8>
-  %a = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %local0
+  %a = load volatile <vscale x 4 x i8>, ptr %local0
   %a_ext = sext <vscale x 4 x i8> %a to <vscale x 4 x i32>
-  %b = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %local1
+  %b = load volatile <vscale x 4 x i8>, ptr %local1
   %b_ext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
   %sum = add <vscale x 4 x i32> %a_ext, %b_ext
   ret <vscale x 4 x i32> %sum
@@ -72,8 +72,8 @@ define void @fill_nxv2i8() {
 ; CHECK-DAG: ld1b    { z{{[01]}}.d }, p0/z, [sp, #6, mul vl]
   %local0 = alloca <vscale x 2 x i8>
   %local1 = alloca <vscale x 2 x i8>
-  load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %local0
-  load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %local1
+  load volatile <vscale x 2 x i8>, ptr %local0
+  load volatile <vscale x 2 x i8>, ptr %local1
   ret void
 }
 
@@ -83,9 +83,9 @@ define <vscale x 2 x i64> @fill_signed_nxv2i8() {
 ; CHECK-DAG: ld1sb    { z{{[01]}}.d }, p0/z, [sp, #6, mul vl]
   %local0 = alloca <vscale x 2 x i8>
   %local1 = alloca <vscale x 2 x i8>
-  %a = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %local0
+  %a = load volatile <vscale x 2 x i8>, ptr %local0
   %a_ext = sext <vscale x 2 x i8> %a to <vscale x 2 x i64>
-  %b = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %local1
+  %b = load volatile <vscale x 2 x i8>, ptr %local1
   %b_ext = sext <vscale x 2 x i8> %b to <vscale x 2 x i64>
   %sum = add <vscale x 2 x i64> %a_ext, %b_ext
   ret <vscale x 2 x i64> %sum
@@ -97,8 +97,8 @@ define void @fill_nxv8i16() {
 ; CHECK-DAG: ld1h    { z{{[01]}}.h }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x i16>
   %local1 = alloca <vscale x 8 x i16>
-  load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %local0
-  load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %local1
+  load volatile <vscale x 8 x i16>, ptr %local0
+  load volatile <vscale x 8 x i16>, ptr %local1
   ret void
 }
 
@@ -108,8 +108,8 @@ define void @fill_nxv4i16() {
 ; CHECK-DAG: ld1h    { z{{[01]}}.s }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 4 x i16>
   %local1 = alloca <vscale x 4 x i16>
-  load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %local0
-  load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %local1
+  load volatile <vscale x 4 x i16>, ptr %local0
+  load volatile <vscale x 4 x i16>, ptr %local1
   ret void
 }
 
@@ -119,9 +119,9 @@ define <vscale x 4 x i32> @fill_signed_nxv4i16() {
 ; CHECK-DAG: ld1sh    { z{{[01]}}.s }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 4 x i16>
   %local1 = alloca <vscale x 4 x i16>
-  %a = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %local0
+  %a = load volatile <vscale x 4 x i16>, ptr %local0
   %a_ext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
-  %b = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %local1
+  %b = load volatile <vscale x 4 x i16>, ptr %local1
   %b_ext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %sum = add <vscale x 4 x i32> %a_ext, %b_ext
   ret <vscale x 4 x i32> %sum
@@ -133,8 +133,8 @@ define void @fill_nxv2i16() {
 ; CHECK-DAG: ld1h    { z{{[01]}}.d }, p0/z, [sp, #2, mul vl]
   %local0 = alloca <vscale x 2 x i16>
   %local1 = alloca <vscale x 2 x i16>
-  load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %local0
-  load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %local1
+  load volatile <vscale x 2 x i16>, ptr %local0
+  load volatile <vscale x 2 x i16>, ptr %local1
   ret void
 }
 
@@ -144,9 +144,9 @@ define <vscale x 2 x i64> @fill_signed_nxv2i16() {
 ; CHECK-DAG: ld1sh    { z{{[01]}}.d }, p0/z, [sp, #2, mul vl]
   %local0 = alloca <vscale x 2 x i16>
   %local1 = alloca <vscale x 2 x i16>
-  %a = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %local0
+  %a = load volatile <vscale x 2 x i16>, ptr %local0
   %a_ext = sext <vscale x 2 x i16> %a to <vscale x 2 x i64>
-  %b = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %local1
+  %b = load volatile <vscale x 2 x i16>, ptr %local1
   %b_ext = sext <vscale x 2 x i16> %b to <vscale x 2 x i64>
   %sum = add <vscale x 2 x i64> %a_ext, %b_ext
   ret <vscale x 2 x i64> %sum
@@ -158,8 +158,8 @@ define void @fill_nxv4i32() {
 ; CHECK-DAG: ld1w    { z{{[01]}}.s }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 4 x i32>
   %local1 = alloca <vscale x 4 x i32>
-  load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %local0
-  load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %local1
+  load volatile <vscale x 4 x i32>, ptr %local0
+  load volatile <vscale x 4 x i32>, ptr %local1
   ret void
 }
 
@@ -169,8 +169,8 @@ define void @fill_nxv2i32() {
 ; CHECK-DAG: ld1w    { z{{[01]}}.d }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 2 x i32>
   %local1 = alloca <vscale x 2 x i32>
-  load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %local0
-  load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %local1
+  load volatile <vscale x 2 x i32>, ptr %local0
+  load volatile <vscale x 2 x i32>, ptr %local1
   ret void
 }
 
@@ -180,9 +180,9 @@ define <vscale x 2 x i64> @fill_signed_nxv2i32() {
 ; CHECK-DAG: ld1sw    { z{{[01]}}.d }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 2 x i32>
   %local1 = alloca <vscale x 2 x i32>
-  %a = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %local0
+  %a = load volatile <vscale x 2 x i32>, ptr %local0
   %a_ext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
-  %b = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %local1
+  %b = load volatile <vscale x 2 x i32>, ptr %local1
   %b_ext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
   %sum = add <vscale x 2 x i64> %a_ext, %b_ext
   ret <vscale x 2 x i64> %sum
@@ -194,8 +194,8 @@ define void @fill_nxv2i64() {
 ; CHECK-DAG: ld1d    { z{{[01]}}.d }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 2 x i64>
   %local1 = alloca <vscale x 2 x i64>
-  load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %local0
-  load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %local1
+  load volatile <vscale x 2 x i64>, ptr %local0
+  load volatile <vscale x 2 x i64>, ptr %local1
   ret void
 }
 
@@ -205,8 +205,8 @@ define void @fill_nxv8bf16() {
 ; CHECK-DAG: ld1h    { z{{[01]}}.h }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x bfloat>
   %local1 = alloca <vscale x 8 x bfloat>
-  load volatile <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %local0
-  load volatile <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %local1
+  load volatile <vscale x 8 x bfloat>, ptr %local0
+  load volatile <vscale x 8 x bfloat>, ptr %local1
   ret void
 }
 
@@ -216,8 +216,8 @@ define void @fill_nxv8f16() {
 ; CHECK-DAG: ld1h    { z{{[01]}}.h }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x half>
   %local1 = alloca <vscale x 8 x half>
-  load volatile <vscale x 8 x half>, <vscale x 8 x half>* %local0
-  load volatile <vscale x 8 x half>, <vscale x 8 x half>* %local1
+  load volatile <vscale x 8 x half>, ptr %local0
+  load volatile <vscale x 8 x half>, ptr %local1
   ret void
 }
 
@@ -227,8 +227,8 @@ define void @fill_nxv4f32() {
 ; CHECK-DAG: ld1w    { z{{[01]}}.s }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 4 x float>
   %local1 = alloca <vscale x 4 x float>
-  load volatile <vscale x 4 x float>, <vscale x 4 x float>* %local0
-  load volatile <vscale x 4 x float>, <vscale x 4 x float>* %local1
+  load volatile <vscale x 4 x float>, ptr %local0
+  load volatile <vscale x 4 x float>, ptr %local1
   ret void
 }
 
@@ -238,8 +238,8 @@ define void @fill_nxv2f64() {
 ; CHECK-DAG: ld1d    { z{{[01]}}.d }, p0/z, [sp, #1, mul vl]
   %local0 = alloca <vscale x 2 x double>
   %local1 = alloca <vscale x 2 x double>
-  load volatile <vscale x 2 x double>, <vscale x 2 x double>* %local0
-  load volatile <vscale x 2 x double>, <vscale x 2 x double>* %local1
+  load volatile <vscale x 2 x double>, ptr %local0
+  load volatile <vscale x 2 x double>, ptr %local1
   ret void
 }
 
@@ -252,8 +252,8 @@ define void @spill_nxv16i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1) {
 ; CHECK-DAG: st1b    { z{{[01]}}.b }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 16 x i8>
   %local1 = alloca <vscale x 16 x i8>
-  store volatile <vscale x 16 x i8> %v0, <vscale x 16 x i8>* %local0
-  store volatile <vscale x 16 x i8> %v1, <vscale x 16 x i8>* %local1
+  store volatile <vscale x 16 x i8> %v0, ptr %local0
+  store volatile <vscale x 16 x i8> %v1, ptr %local1
   ret void
 }
 
@@ -263,8 +263,8 @@ define void @spill_nxv8i8(<vscale x 8 x i8> %v0, <vscale x 8 x i8> %v1) {
 ; CHECK-DAG: st1b    { z{{[01]}}.h }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x i8>
   %local1 = alloca <vscale x 8 x i8>
-  store volatile <vscale x 8 x i8> %v0, <vscale x 8 x i8>* %local0
-  store volatile <vscale x 8 x i8> %v1, <vscale x 8 x i8>* %local1
+  store volatile <vscale x 8 x i8> %v0, ptr %local0
+  store volatile <vscale x 8 x i8> %v1, ptr %local1
   ret void
 }
 
@@ -274,8 +274,8 @@ define void @spill_nxv4i8(<vscale x 4 x i8> %v0, <vscale x 4 x i8> %v1) {
 ; CHECK-DAG: st1b    { z{{[01]}}.s }, p0, [sp, #2, mul vl]
   %local0 = alloca <vscale x 4 x i8>
   %local1 = alloca <vscale x 4 x i8>
-  store volatile <vscale x 4 x i8> %v0, <vscale x 4 x i8>* %local0
-  store volatile <vscale x 4 x i8> %v1, <vscale x 4 x i8>* %local1
+  store volatile <vscale x 4 x i8> %v0, ptr %local0
+  store volatile <vscale x 4 x i8> %v1, ptr %local1
   ret void
 }
 
@@ -285,8 +285,8 @@ define void @spill_nxv2i8(<vscale x 2 x i8> %v0, <vscale x 2 x i8> %v1) {
 ; CHECK-DAG: st1b    { z{{[01]}}.d }, p0, [sp, #6, mul vl]
   %local0 = alloca <vscale x 2 x i8>
   %local1 = alloca <vscale x 2 x i8>
-  store volatile <vscale x 2 x i8> %v0, <vscale x 2 x i8>* %local0
-  store volatile <vscale x 2 x i8> %v1, <vscale x 2 x i8>* %local1
+  store volatile <vscale x 2 x i8> %v0, ptr %local0
+  store volatile <vscale x 2 x i8> %v1, ptr %local1
   ret void
 }
 
@@ -296,8 +296,8 @@ define void @spill_nxv8i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1) {
 ; CHECK-DAG: st1h    { z{{[01]}}.h }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x i16>
   %local1 = alloca <vscale x 8 x i16>
-  store volatile <vscale x 8 x i16> %v0, <vscale x 8 x i16>* %local0
-  store volatile <vscale x 8 x i16> %v1, <vscale x 8 x i16>* %local1
+  store volatile <vscale x 8 x i16> %v0, ptr %local0
+  store volatile <vscale x 8 x i16> %v1, ptr %local1
   ret void
 }
 
@@ -307,8 +307,8 @@ define void @spill_nxv4i16(<vscale x 4 x i16> %v0, <vscale x 4 x i16> %v1) {
 ; CHECK-DAG: st1h    { z{{[01]}}.s }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 4 x i16>
   %local1 = alloca <vscale x 4 x i16>
-  store volatile <vscale x 4 x i16> %v0, <vscale x 4 x i16>* %local0
-  store volatile <vscale x 4 x i16> %v1, <vscale x 4 x i16>* %local1
+  store volatile <vscale x 4 x i16> %v0, ptr %local0
+  store volatile <vscale x 4 x i16> %v1, ptr %local1
   ret void
 }
 
@@ -318,8 +318,8 @@ define void @spill_nxv2i16(<vscale x 2 x i16> %v0, <vscale x 2 x i16> %v1) {
 ; CHECK-DAG: st1h    { z{{[01]}}.d }, p0, [sp, #2, mul vl]
   %local0 = alloca <vscale x 2 x i16>
   %local1 = alloca <vscale x 2 x i16>
-  store volatile <vscale x 2 x i16> %v0, <vscale x 2 x i16>* %local0
-  store volatile <vscale x 2 x i16> %v1, <vscale x 2 x i16>* %local1
+  store volatile <vscale x 2 x i16> %v0, ptr %local0
+  store volatile <vscale x 2 x i16> %v1, ptr %local1
   ret void
 }
 
@@ -329,8 +329,8 @@ define void @spill_nxv4i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1) {
 ; CHECK-DAG: st1w    { z{{[01]}}.s }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 4 x i32>
   %local1 = alloca <vscale x 4 x i32>
-  store volatile <vscale x 4 x i32> %v0, <vscale x 4 x i32>* %local0
-  store volatile <vscale x 4 x i32> %v1, <vscale x 4 x i32>* %local1
+  store volatile <vscale x 4 x i32> %v0, ptr %local0
+  store volatile <vscale x 4 x i32> %v1, ptr %local1
   ret void
 }
 
@@ -340,8 +340,8 @@ define void @spill_nxv2i32(<vscale x 2 x i32> %v0, <vscale x 2 x i32> %v1) {
 ; CHECK-DAG: st1w    { z{{[01]}}.d }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 2 x i32>
   %local1 = alloca <vscale x 2 x i32>
-  store volatile <vscale x 2 x i32> %v0, <vscale x 2 x i32>* %local0
-  store volatile <vscale x 2 x i32> %v1, <vscale x 2 x i32>* %local1
+  store volatile <vscale x 2 x i32> %v0, ptr %local0
+  store volatile <vscale x 2 x i32> %v1, ptr %local1
   ret void
 }
 
@@ -351,8 +351,8 @@ define void @spill_nxv2i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1) {
 ; CHECK-DAG: st1d    { z{{[01]}}.d }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 2 x i64>
   %local1 = alloca <vscale x 2 x i64>
-  store volatile <vscale x 2 x i64> %v0, <vscale x 2 x i64>* %local0
-  store volatile <vscale x 2 x i64> %v1, <vscale x 2 x i64>* %local1
+  store volatile <vscale x 2 x i64> %v0, ptr %local0
+  store volatile <vscale x 2 x i64> %v1, ptr %local1
   ret void
 }
 
@@ -362,8 +362,8 @@ define void @spill_nxv8f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1) {
 ; CHECK-DAG: st1h    { z{{[01]}}.h }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x half>
   %local1 = alloca <vscale x 8 x half>
-  store volatile <vscale x 8 x half> %v0, <vscale x 8 x half>* %local0
-  store volatile <vscale x 8 x half> %v1, <vscale x 8 x half>* %local1
+  store volatile <vscale x 8 x half> %v0, ptr %local0
+  store volatile <vscale x 8 x half> %v1, ptr %local1
   ret void
 }
 
@@ -373,8 +373,8 @@ define void @spill_nxv8bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1
 ; CHECK-DAG: st1h    { z{{[01]}}.h }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 8 x bfloat>
   %local1 = alloca <vscale x 8 x bfloat>
-  store volatile <vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat>* %local0
-  store volatile <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat>* %local1
+  store volatile <vscale x 8 x bfloat> %v0, ptr %local0
+  store volatile <vscale x 8 x bfloat> %v1, ptr %local1
   ret void
 }
 
@@ -384,8 +384,8 @@ define void @spill_nxv4f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1) {
 ; CHECK-DAG: st1w    { z{{[01]}}.s }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 4 x float>
   %local1 = alloca <vscale x 4 x float>
-  store volatile <vscale x 4 x float> %v0, <vscale x 4 x float>* %local0
-  store volatile <vscale x 4 x float> %v1, <vscale x 4 x float>* %local1
+  store volatile <vscale x 4 x float> %v0, ptr %local0
+  store volatile <vscale x 4 x float> %v1, ptr %local1
   ret void
 }
 
@@ -395,8 +395,8 @@ define void @spill_nxv2f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1)
 ; CHECK-DAG: st1d    { z{{[01]}}.d }, p0, [sp, #1, mul vl]
   %local0 = alloca <vscale x 2 x double>
   %local1 = alloca <vscale x 2 x double>
-  store volatile <vscale x 2 x double> %v0, <vscale x 2 x double>* %local0
-  store volatile <vscale x 2 x double> %v1, <vscale x 2 x double>* %local1
+  store volatile <vscale x 2 x double> %v0, ptr %local0
+  store volatile <vscale x 2 x double> %v1, ptr %local1
   ret void
 }
 
@@ -408,8 +408,8 @@ define void @fill_nxv16i1() {
 ; CHECK-DAG: ldr    p{{[01]}}, [sp, #6, mul vl]
   %local0 = alloca <vscale x 16 x i1>
   %local1 = alloca <vscale x 16 x i1>
-  load volatile <vscale x 16 x i1>, <vscale x 16 x i1>* %local0
-  load volatile <vscale x 16 x i1>, <vscale x 16 x i1>* %local1
+  load volatile <vscale x 16 x i1>, ptr %local0
+  load volatile <vscale x 16 x i1>, ptr %local1
   ret void
 }
 
@@ -421,7 +421,7 @@ define void @spill_nxv16i1(<vscale x 16 x i1> %v0, <vscale x 16 x i1> %v1) {
 ; CHECK-DAG: str    p{{[01]}}, [sp, #6, mul vl]
   %local0 = alloca <vscale x 16 x i1>
   %local1 = alloca <vscale x 16 x i1>
-  store volatile <vscale x 16 x i1> %v0, <vscale x 16 x i1>* %local0
-  store volatile <vscale x 16 x i1> %v1, <vscale x 16 x i1>* %local1
+  store volatile <vscale x 16 x i1> %v0, ptr %local0
+  store volatile <vscale x 16 x i1> %v1, ptr %local1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-reassign-sve.mir b/llvm/test/CodeGen/AArch64/stack-guard-reassign-sve.mir
index 6af66df290301..6e448e68f4ab8 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-reassign-sve.mir
+++ b/llvm/test/CodeGen/AArch64/stack-guard-reassign-sve.mir
@@ -1,9 +1,9 @@
 # RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -start-before=localstackalloc -stop-after=prologepilog -o - %s | FileCheck %s
 
 --- |
-  @__stack_chk_guard = external global i8*
-  define i32 @main(i32, i8**) {
-    %StackGuardSlot = alloca i8*
+  @__stack_chk_guard = external global ptr
+  define i32 @main(i32, ptr) {
+    %StackGuardSlot = alloca ptr
     unreachable
   }
 ...

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-reassign.mir b/llvm/test/CodeGen/AArch64/stack-guard-reassign.mir
index fabaed184b85a..6a7ef800b3d53 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-reassign.mir
+++ b/llvm/test/CodeGen/AArch64/stack-guard-reassign.mir
@@ -1,9 +1,9 @@
 # RUN: llc -mtriple=arm64-apple-ios -start-before=localstackalloc -stop-after=prologepilog -o - %s | FileCheck %s
 
 --- |
-  @__stack_chk_guard = external global i8*
-  define i32 @main(i32, i8**) {
-    %StackGuardSlot = alloca i8*
+  @__stack_chk_guard = external global ptr
+  define i32 @main(i32, ptr) {
+    %StackGuardSlot = alloca ptr
     unreachable
   }
 ...

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-sve.ll b/llvm/test/CodeGen/AArch64/stack-guard-sve.ll
index 5acbb22bf1ab5..e3308e95e46a0 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-sve.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-sve.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
 declare dso_local void @val_fn(<vscale x 4 x float>)
-declare dso_local void @ptr_fn(<vscale x 4 x float>*)
+declare dso_local void @ptr_fn(ptr)
 
 ; An alloca of a scalable vector shouldn't trigger stack protection.
 
@@ -13,8 +13,8 @@ declare dso_local void @ptr_fn(<vscale x 4 x float>*)
 define void @call_value() #0 {
 entry:
   %x = alloca <vscale x 4 x float>, align 16
-  store <vscale x 4 x float> zeroinitializer, <vscale x 4 x float>* %x, align 16
-  %0 = load <vscale x 4 x float>, <vscale x 4 x float>* %x, align 16
+  store <vscale x 4 x float> zeroinitializer, ptr %x, align 16
+  %0 = load <vscale x 4 x float>, ptr %x, align 16
   call void @val_fn(<vscale x 4 x float> %0)
   ret void
 }
@@ -27,8 +27,8 @@ entry:
 define void @call_value_strong() #1 {
 entry:
   %x = alloca <vscale x 4 x float>, align 16
-  store <vscale x 4 x float> zeroinitializer, <vscale x 4 x float>* %x, align 16
-  %0 = load <vscale x 4 x float>, <vscale x 4 x float>* %x, align 16
+  store <vscale x 4 x float> zeroinitializer, ptr %x, align 16
+  %0 = load <vscale x 4 x float>, ptr %x, align 16
   call void @val_fn(<vscale x 4 x float> %0)
   ret void
 }
@@ -45,7 +45,7 @@ entry:
 define void @call_ptr() #0 {
 entry:
   %x = alloca <vscale x 4 x float>, align 16
-  call void @ptr_fn(<vscale x 4 x float>* %x)
+  call void @ptr_fn(ptr %x)
   ret void
 }
 
@@ -60,7 +60,7 @@ entry:
 define void @call_ptr_strong() #1 {
 entry:
   %x = alloca <vscale x 4 x float>, align 16
-  call void @ptr_fn(<vscale x 4 x float>* %x)
+  call void @ptr_fn(ptr %x)
   ret void
 }
 
@@ -78,10 +78,10 @@ define void @call_both() #0 {
 entry:
   %x = alloca <vscale x 4 x float>, align 16
   %y = alloca <vscale x 4 x float>, align 16
-  store <vscale x 4 x float> zeroinitializer, <vscale x 4 x float>* %x, align 16
-  %0 = load <vscale x 4 x float>, <vscale x 4 x float>* %x, align 16
+  store <vscale x 4 x float> zeroinitializer, ptr %x, align 16
+  %0 = load <vscale x 4 x float>, ptr %x, align 16
   call void @val_fn(<vscale x 4 x float> %0)
-  call void @ptr_fn(<vscale x 4 x float>* %y)
+  call void @ptr_fn(ptr %y)
   ret void
 }
 
@@ -99,10 +99,10 @@ define void @call_both_strong() #1 {
 entry:
   %x = alloca <vscale x 4 x float>, align 16
   %y = alloca <vscale x 4 x float>, align 16
-  store <vscale x 4 x float> zeroinitializer, <vscale x 4 x float>* %x, align 16
-  %0 = load <vscale x 4 x float>, <vscale x 4 x float>* %x, align 16
+  store <vscale x 4 x float> zeroinitializer, ptr %x, align 16
+  %0 = load <vscale x 4 x float>, ptr %x, align 16
   call void @val_fn(<vscale x 4 x float> %0)
-  call void @ptr_fn(<vscale x 4 x float>* %y)
+  call void @ptr_fn(ptr %y)
   ret void
 }
 
@@ -120,8 +120,8 @@ entry:
 define void @callee_save(<vscale x 4 x float> %x) #0 {
 entry:
   %x.addr = alloca <vscale x 4 x float>, align 16
-  store <vscale x 4 x float> %x, <vscale x 4 x float>* %x.addr, align 16
-  call void @ptr_fn(<vscale x 4 x float>* %x.addr)
+  store <vscale x 4 x float> %x, ptr %x.addr, align 16
+  call void @ptr_fn(ptr %x.addr)
   ret void
 }
 
@@ -138,8 +138,8 @@ entry:
 define void @callee_save_strong(<vscale x 4 x float> %x) #1 {
 entry:
   %x.addr = alloca <vscale x 4 x float>, align 16
-  store <vscale x 4 x float> %x, <vscale x 4 x float>* %x.addr, align 16
-  call void @ptr_fn(<vscale x 4 x float>* %x.addr)
+  store <vscale x 4 x float> %x, ptr %x.addr, align 16
+  call void @ptr_fn(ptr %x.addr)
   ret void
 }
 
@@ -177,22 +177,22 @@ entry:
 define void @local_stack_alloc(i64 %val) #0 {
 entry:
   %char_arr = alloca [8 x i8], align 4
-  %gep0 = getelementptr [8 x i8], [8 x i8]* %char_arr, i64 0, i64 0
-  store i8 0, i8* %gep0, align 8
+  %gep0 = getelementptr [8 x i8], ptr %char_arr, i64 0, i64 0
+  store i8 0, ptr %gep0, align 8
   %large1 = alloca [4096 x i64], align 8
   %large2 = alloca [4096 x i64], align 8
   %vec_1 = alloca <vscale x 4 x float>, align 16
   %vec_2 = alloca <vscale x 4 x float>, align 16
-  %gep1 = getelementptr [4096 x i64], [4096 x i64]* %large1, i64 0, i64 0
-  %gep2 = getelementptr [4096 x i64], [4096 x i64]* %large1, i64 0, i64 1
-  store i64 %val, i64* %gep1, align 8
-  store i64 %val, i64* %gep2, align 8
-  %gep3 = getelementptr [4096 x i64], [4096 x i64]* %large2, i64 0, i64 0
-  %gep4 = getelementptr [4096 x i64], [4096 x i64]* %large2, i64 0, i64 1
-  store i64 %val, i64* %gep3, align 8
-  store i64 %val, i64* %gep4, align 8
-  call void @ptr_fn(<vscale x 4 x float>* %vec_1)
-  call void @ptr_fn(<vscale x 4 x float>* %vec_2)
+  %gep1 = getelementptr [4096 x i64], ptr %large1, i64 0, i64 0
+  %gep2 = getelementptr [4096 x i64], ptr %large1, i64 0, i64 1
+  store i64 %val, ptr %gep1, align 8
+  store i64 %val, ptr %gep2, align 8
+  %gep3 = getelementptr [4096 x i64], ptr %large2, i64 0, i64 0
+  %gep4 = getelementptr [4096 x i64], ptr %large2, i64 0, i64 1
+  store i64 %val, ptr %gep3, align 8
+  store i64 %val, ptr %gep4, align 8
+  call void @ptr_fn(ptr %vec_1)
+  call void @ptr_fn(ptr %vec_2)
   ret void
 }
 
@@ -227,22 +227,22 @@ entry:
 define void @local_stack_alloc_strong(i64 %val) #1 {
 entry:
   %char_arr = alloca [8 x i8], align 4
-  %gep0 = getelementptr [8 x i8], [8 x i8]* %char_arr, i64 0, i64 0
-  store i8 0, i8* %gep0, align 8
+  %gep0 = getelementptr [8 x i8], ptr %char_arr, i64 0, i64 0
+  store i8 0, ptr %gep0, align 8
   %large1 = alloca [4096 x i64], align 8
   %large2 = alloca [4096 x i64], align 8
   %vec_1 = alloca <vscale x 4 x float>, align 16
   %vec_2 = alloca <vscale x 4 x float>, align 16
-  %gep1 = getelementptr [4096 x i64], [4096 x i64]* %large1, i64 0, i64 0
-  %gep2 = getelementptr [4096 x i64], [4096 x i64]* %large1, i64 0, i64 1
-  store i64 %val, i64* %gep1, align 8
-  store i64 %val, i64* %gep2, align 8
-  %gep3 = getelementptr [4096 x i64], [4096 x i64]* %large2, i64 0, i64 0
-  %gep4 = getelementptr [4096 x i64], [4096 x i64]* %large2, i64 0, i64 1
-  store i64 %val, i64* %gep3, align 8
-  store i64 %val, i64* %gep4, align 8
-  call void @ptr_fn(<vscale x 4 x float>* %vec_1)
-  call void @ptr_fn(<vscale x 4 x float>* %vec_2)
+  %gep1 = getelementptr [4096 x i64], ptr %large1, i64 0, i64 0
+  %gep2 = getelementptr [4096 x i64], ptr %large1, i64 0, i64 1
+  store i64 %val, ptr %gep1, align 8
+  store i64 %val, ptr %gep2, align 8
+  %gep3 = getelementptr [4096 x i64], ptr %large2, i64 0, i64 0
+  %gep4 = getelementptr [4096 x i64], ptr %large2, i64 0, i64 1
+  store i64 %val, ptr %gep3, align 8
+  store i64 %val, ptr %gep4, align 8
+  call void @ptr_fn(ptr %vec_1)
+  call void @ptr_fn(ptr %vec_2)
   ret void
 }
 
@@ -255,8 +255,8 @@ entry:
 define void @vector_gep_3() #0 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 3
-  store float 0.0, float* %gep, align 4
+  %gep = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 3
+  store float 0.0, ptr %gep, align 4
   ret void
 }
 
@@ -265,8 +265,8 @@ entry:
 define void @vector_gep_4() #0 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 4
-  store float 0.0, float* %gep, align 4
+  %gep = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 4
+  store float 0.0, ptr %gep, align 4
   ret void
 }
 
@@ -275,10 +275,10 @@ entry:
 define void @vector_gep_twice() #0 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep1 = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 3
-  store float 0.0, float* %gep1, align 4
-  %gep2 = getelementptr float, float* %gep1, i64 1
-  store float 0.0, float* %gep2, align 4
+  %gep1 = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 3
+  store float 0.0, ptr %gep1, align 4
+  %gep2 = getelementptr float, ptr %gep1, i64 1
+  store float 0.0, ptr %gep2, align 4
   ret void
 }
 
@@ -287,8 +287,8 @@ entry:
 define void @vector_gep_n(i64 %n) #0 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 %n
-  store float 0.0, float* %gep, align 4
+  %gep = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 %n
+  store float 0.0, ptr %gep, align 4
   ret void
 }
 
@@ -297,8 +297,8 @@ entry:
 define void @vector_gep_3_strong() #1 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 3
-  store float 0.0, float* %gep, align 4
+  %gep = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 3
+  store float 0.0, ptr %gep, align 4
   ret void
 }
 
@@ -307,8 +307,8 @@ entry:
 define void @vector_gep_4_strong(i64 %val) #1 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 4
-  store float 0.0, float* %gep, align 4
+  %gep = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 4
+  store float 0.0, ptr %gep, align 4
   ret void
 }
 
@@ -318,10 +318,10 @@ entry:
 define void @vector_gep_twice_strong() #1 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep1 = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 3
-  store float 0.0, float* %gep1, align 4
-  %gep2 = getelementptr float, float* %gep1, i64 1
-  store float 0.0, float* %gep2, align 4
+  %gep1 = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 3
+  store float 0.0, ptr %gep1, align 4
+  %gep2 = getelementptr float, ptr %gep1, i64 1
+  store float 0.0, ptr %gep2, align 4
   ret void
 }
 
@@ -330,8 +330,8 @@ entry:
 define void @vector_gep_n_strong(i64 %n) #1 {
 entry:
   %vec = alloca <vscale x 4 x float>, align 16
-  %gep = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %vec, i64 0, i64 %n
-  store float 0.0, float* %gep, align 4
+  %gep = getelementptr <vscale x 4 x float>, ptr %vec, i64 0, i64 %n
+  store float 0.0, ptr %gep, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/stack-probing-64k.ll b/llvm/test/CodeGen/AArch64/stack-probing-64k.ll
index 2f15e317a7f58..0d64e7378b55a 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-64k.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-64k.ll
@@ -24,7 +24,7 @@ define void @static_65536(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 65536, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -51,7 +51,7 @@ define void @static_65552(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 65552, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -79,7 +79,7 @@ define void @static_66560(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 66560, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -108,7 +108,7 @@ define void @static_66576(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 66576, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -139,7 +139,7 @@ define void @static_132096(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 132096, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -179,7 +179,7 @@ define void @static_327664(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 327664, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -210,7 +210,7 @@ define void @static_327680(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 327680, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -246,7 +246,7 @@ define void @static_328704(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 328704, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -283,7 +283,7 @@ define void @static_328720(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 328720, align 1
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -325,7 +325,7 @@ define void @static_16_align_131072(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 16, align 131072
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -355,7 +355,7 @@ define void @static_16_align_8192(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 16, align 8192
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 
@@ -385,7 +385,7 @@ define void @static_32752_align_32k(ptr %out) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %v = alloca i8, i64 32752, align 32768
-  store i8* %v, ptr %out, align 8
+  store ptr %v, ptr %out, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-cfi.ll b/llvm/test/CodeGen/AArch64/stack-tagging-cfi.ll
index 7e1ab1ce826d5..c6e4a1f56b707 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-cfi.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-cfi.ll
@@ -8,7 +8,7 @@ entry:
 ; CHECK: .cfi_mte_tagged_frame
 ; CHECK: stg
   %x = alloca i32, align 4
-  call void @use32(i32* %x)
+  call void @use32(ptr %x)
   ret void
 }
 
@@ -18,7 +18,7 @@ entry:
 ; CHECK-NOT: .cfi_mte_tagged_frame
 ; CHECK: stg
   %x = alloca i32, align 4
-  call void @use32(i32* %x)
+  call void @use32(ptr %x)
   ret void
 }
 
@@ -28,7 +28,7 @@ entry:
 ; CHECK: .cfi_mte_tagged_frame
 ; CHECK: stg
   %x = alloca i32, align 4
-  call void @use32(i32* %x)
+  call void @use32(ptr %x)
   ret void
 }
 
@@ -38,6 +38,6 @@ entry:
 ; CHECK-NOT: .cfi_mte_tagged_frame
 ; CHECK-NOT: stg
   %x = alloca i32, align 4
-  call void @use32(i32* %x)
+  call void @use32(ptr %x)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-ld3.mir b/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-ld3.mir
index ec4437f1a443c..b8b3642052ec5 100644
--- a/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-ld3.mir
+++ b/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-ld3.mir
@@ -1,6 +1,6 @@
 # RUN: llc -run-pass=aarch64-ldst-opt -mtriple=arm64-apple-iphoneos -aarch64-load-store-renaming=true -o - -verify-machineinstrs %s | FileCheck %s
 --- |
-  define void @test_ld3(<8 x i8>* %a1) {
+  define void @test_ld3(ptr %a1) {
   entry:
     %s1 = alloca i64
     ret void

diff  --git a/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir b/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
index 3030a8456bb60..66d2067b531a3 100644
--- a/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
+++ b/llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir
@@ -40,8 +40,8 @@ machineFunctionInfo: {}
 body:             |
   bb.0:
     liveins: $x0, $x17, $x18
-    renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `<16 x i8>* undef`, align 64)
-    renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `<16 x i8>* undef`, align 64)
+    renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `ptr undef`, align 64)
+    renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `ptr undef`, align 64)
     renamable $q20 = EXTv16i8 renamable $q23, renamable $q23, 8
     STRQui killed renamable $q20, $sp, 7 :: (store (s128))
     renamable $q20 = EXTv16i8 renamable $q14, renamable $q14, 8

diff  --git a/llvm/test/CodeGen/AArch64/sub-of-bias.ll b/llvm/test/CodeGen/AArch64/sub-of-bias.ll
index 17b548cbeab8c..d5b5a5ebeeb22 100644
--- a/llvm/test/CodeGen/AArch64/sub-of-bias.ll
+++ b/llvm/test/CodeGen/AArch64/sub-of-bias.ll
@@ -45,7 +45,7 @@ define i32 @t2_commutative(i32 %ptr, i32 %mask) nounwind {
 
 ; Extra use tests
 
-define i32 @n3_extrause1(i32 %ptr, i32 %mask, i32* %bias_storage) nounwind {
+define i32 @n3_extrause1(i32 %ptr, i32 %mask, ptr %bias_storage) nounwind {
 ; CHECK-LABEL: n3_extrause1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, w1
@@ -53,7 +53,7 @@ define i32 @n3_extrause1(i32 %ptr, i32 %mask, i32* %bias_storage) nounwind {
 ; CHECK-NEXT:    str w8, [x2]
 ; CHECK-NEXT:    ret
   %bias = and i32 %ptr, %mask ; has extra uses, can't fold
-  store i32 %bias, i32* %bias_storage
+  store i32 %bias, ptr %bias_storage
   %r = sub i32 %ptr, %bias
   ret i32 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-alloca-stackid.ll b/llvm/test/CodeGen/AArch64/sve-alloca-stackid.ll
index bc19681169c7d..64424f5d4f360 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca-stackid.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca-stackid.ll
@@ -10,11 +10,11 @@
 ; CHECKISEL-NEXT:  stack-id: scalable-vector
 define i32 @foo(<vscale x 16 x i8> %val) {
   %ptr = alloca <vscale x 16 x i8>
-  %res = call i32 @bar(<vscale x 16 x i8>* %ptr)
+  %res = call i32 @bar(ptr %ptr)
   ret i32 %res
 }
 
-declare i32 @bar(<vscale x 16 x i8>* %ptr);
+declare i32 @bar(ptr %ptr);
 
 ; CHECKCG-LABEL: foo2:
 ; CHECKCG: addvl   sp, sp, #-2
@@ -26,7 +26,7 @@ declare i32 @bar(<vscale x 16 x i8>* %ptr);
 
 define i32 @foo2(<vscale x 32 x i8> %val) {
   %ptr = alloca <vscale x 32 x i8>, align 16
-  %res = call i32 @bar2(<vscale x 32 x i8>* %ptr)
+  %res = call i32 @bar2(ptr %ptr)
   ret i32 %res
 }
-declare i32 @bar2(<vscale x 32 x i8>* %ptr);
+declare i32 @bar2(ptr %ptr);

diff  --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll
index 209c6198fe575..47e49b84aaaff 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 -mattr=+sve < %s | FileCheck %s
 
-declare void @bar(<vscale x 4 x i64>*)
+declare void @bar(ptr)
 
 define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
 ; CHECK-LABEL: foo:
@@ -104,8 +104,8 @@ entry:
 
 if.then:
   %ptr = alloca <vscale x 4 x i64>
-  store <vscale x 4 x i64> %dst, <vscale x 4 x i64>* %ptr
-  call void @bar(<vscale x 4 x i64>* %ptr)
+  store <vscale x 4 x i64> %dst, ptr %ptr
+  call void @bar(ptr %ptr)
   br label %if.end
 
 if.end:

diff  --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 783878fe73806..9851583b950eb 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -173,9 +173,9 @@ define double @foo4(double %x0, ptr %ptr1, ptr %ptr2, ptr %ptr3, <vscale x 8 x d
 ; CHECK-NEXT:    st1d { z5.d }, p0, [x2]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1
-  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2
-  store volatile <vscale x 2 x double> %x3, <vscale x 2 x double>* %ptr3
+  store volatile <vscale x 8 x double> %x1, ptr %ptr1
+  store volatile <vscale x 8 x double> %x2, ptr %ptr2
+  store volatile <vscale x 2 x double> %x3, ptr %ptr3
   ret double %x0
 }
 
@@ -198,8 +198,8 @@ define double @foo5(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, ptr %p
 ; CHECK-NEXT:    st1d { z5.d }, p0, [x7, #1, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1
-  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2
+  store volatile <vscale x 8 x double> %x1, ptr %ptr1
+  store volatile <vscale x 8 x double> %x2, ptr %ptr2
   ret double %x0
 }
 
@@ -219,8 +219,8 @@ define double @foo6(double %x0, double %x1, ptr %ptr1, ptr %ptr2, <vscale x 8 x
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr1
-  store volatile <vscale x 6 x double> %x3, <vscale x 6 x double>* %ptr2
+  store volatile <vscale x 8 x double> %x2, ptr %ptr1
+  store volatile <vscale x 6 x double> %x3, ptr %ptr2
   ret double %x0
 }
 
@@ -244,15 +244,15 @@ define void @aavpcs1(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %
 ; CHECK-NEXT:    st1w { z3.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 4 x i32> %s7, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s8, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s9, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s11, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s12, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s13, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s14, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s15, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s16, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s7, ptr %ptr
+  store volatile <vscale x 4 x i32> %s8, ptr %ptr
+  store volatile <vscale x 4 x i32> %s9, ptr %ptr
+  store volatile <vscale x 4 x i32> %s11, ptr %ptr
+  store volatile <vscale x 4 x i32> %s12, ptr %ptr
+  store volatile <vscale x 4 x i32> %s13, ptr %ptr
+  store volatile <vscale x 4 x i32> %s14, ptr %ptr
+  store volatile <vscale x 4 x i32> %s15, ptr %ptr
+  store volatile <vscale x 4 x i32> %s16, ptr %ptr
   ret void
 }
 
@@ -282,15 +282,15 @@ define void @aavpcs2(float %s0, float %s1, float %s2, float %s3, float %s4, floa
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 4 x float> %s7, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s7, ptr %ptr
+  store volatile <vscale x 4 x float> %s8, ptr %ptr
+  store volatile <vscale x 4 x float> %s9, ptr %ptr
+  store volatile <vscale x 4 x float> %s11, ptr %ptr
+  store volatile <vscale x 4 x float> %s12, ptr %ptr
+  store volatile <vscale x 4 x float> %s13, ptr %ptr
+  store volatile <vscale x 4 x float> %s14, ptr %ptr
+  store volatile <vscale x 4 x float> %s15, ptr %ptr
+  store volatile <vscale x 4 x float> %s16, ptr %ptr
   ret void
 }
 
@@ -322,15 +322,15 @@ define void @aavpcs3(float %s0, float %s1, float %s2, float %s3, float %s4, floa
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s8, ptr %ptr
+  store volatile <vscale x 4 x float> %s9, ptr %ptr
+  store volatile <vscale x 4 x float> %s10, ptr %ptr
+  store volatile <vscale x 4 x float> %s11, ptr %ptr
+  store volatile <vscale x 4 x float> %s12, ptr %ptr
+  store volatile <vscale x 4 x float> %s13, ptr %ptr
+  store volatile <vscale x 4 x float> %s14, ptr %ptr
+  store volatile <vscale x 4 x float> %s15, ptr %ptr
+  store volatile <vscale x 4 x float> %s16, ptr %ptr
   ret void
 }
 
@@ -354,15 +354,15 @@ define void @aavpcs4(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %
 ; CHECK-NEXT:    st1w { z24.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 4 x i32> %s8, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s9, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s10, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s11, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s12, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s13, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s14, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s15, <vscale x 4 x i32>* %ptr
-  store volatile <vscale x 4 x i32> %s16, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s8, ptr %ptr
+  store volatile <vscale x 4 x i32> %s9, ptr %ptr
+  store volatile <vscale x 4 x i32> %s10, ptr %ptr
+  store volatile <vscale x 4 x i32> %s11, ptr %ptr
+  store volatile <vscale x 4 x i32> %s12, ptr %ptr
+  store volatile <vscale x 4 x i32> %s13, ptr %ptr
+  store volatile <vscale x 4 x i32> %s14, ptr %ptr
+  store volatile <vscale x 4 x i32> %s15, ptr %ptr
+  store volatile <vscale x 4 x i32> %s16, ptr %ptr
   ret void
 }
 
@@ -394,15 +394,15 @@ define <vscale x 4 x float> @aavpcs5(float %s0, float %s1, float %s2, float %s3,
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s8, ptr %ptr
+  store volatile <vscale x 4 x float> %s9, ptr %ptr
+  store volatile <vscale x 4 x float> %s10, ptr %ptr
+  store volatile <vscale x 4 x float> %s11, ptr %ptr
+  store volatile <vscale x 4 x float> %s12, ptr %ptr
+  store volatile <vscale x 4 x float> %s13, ptr %ptr
+  store volatile <vscale x 4 x float> %s14, ptr %ptr
+  store volatile <vscale x 4 x float> %s15, ptr %ptr
+  store volatile <vscale x 4 x float> %s16, ptr %ptr
   ret <vscale x 4 x float> %s8
 }
 
@@ -432,15 +432,15 @@ define void @aapcs1(float %s0, float %s1, float %s2, float %s3, float %s4, float
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s8, ptr %ptr
+  store volatile <vscale x 4 x float> %s9, ptr %ptr
+  store volatile <vscale x 4 x float> %s10, ptr %ptr
+  store volatile <vscale x 4 x float> %s11, ptr %ptr
+  store volatile <vscale x 4 x float> %s12, ptr %ptr
+  store volatile <vscale x 4 x float> %s13, ptr %ptr
+  store volatile <vscale x 4 x float> %s14, ptr %ptr
+  store volatile <vscale x 4 x float> %s15, ptr %ptr
+  store volatile <vscale x 4 x float> %s16, ptr %ptr
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
index b1b4d9d73880e..c5db3dfdf5e54 100644
--- a/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-dead-masked-store.ll
@@ -73,5 +73,5 @@ define void @dead_masked_store_same_mask_bigger_type(<vscale x 4 x i16> %val, <v
   ret void
 }
 
-declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
index d2cbbe0628f0f..e2f8dad03ef6f 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
@@ -464,7 +464,7 @@ define void @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, ptr %p) nounwi
 }
 
 ; Check that extract from load via bitcast-gep-of-scalar-ptr does not crash.
-define <4 x i32> @typesize_regression_test_v4i32(i32* %addr, i64 %idx) {
+define <4 x i32> @typesize_regression_test_v4i32(ptr %addr, i64 %idx) {
 ; CHECK-LABEL: typesize_regression_test_v4i32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
@@ -472,9 +472,9 @@ define <4 x i32> @typesize_regression_test_v4i32(i32* %addr, i64 %idx) {
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
 entry:
-  %ptr = getelementptr inbounds i32, i32* %addr, i64 %idx
-  %bc = bitcast i32* %ptr to <vscale x 4 x i32>*
-  %ld = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %bc, align 16
+  %ptr = getelementptr inbounds i32, ptr %addr, i64 %idx
+  %bc = bitcast ptr %ptr to ptr
+  %ld = load volatile <vscale x 4 x i32>, ptr %bc, align 16
   %out = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %ld, i64 0)
   ret <4 x i32> %out
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll b/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
index 0c65a29e8b281..7524f8d985f71 100644
--- a/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
@@ -25,7 +25,7 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %2 = getelementptr inbounds [32000 x i32], ptr %addr, i64 0, i64 %index
-  %load = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %2, align 16
+  %load = load volatile <vscale x 4 x i32>, ptr %2, align 16
   %index.next = add i64 %index, %1
   %3 = icmp eq i64 %index.next, 0
   br i1 %3, label %for.cond.cleanup, label %vector.body
@@ -55,7 +55,7 @@ entry:
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %2 = getelementptr inbounds [32000 x i32], ptr %addr, i64 0, i64 %index
-  store volatile <vscale x 4 x i32> %val, <vscale x 4 x i32>* %2, align 16
+  store volatile <vscale x 4 x i32> %val, ptr %2, align 16
   %index.next = add i64 %index, %1
   %3 = icmp eq i64 %index.next, 0
   br i1 %3, label %for.cond.cleanup, label %vector.body

diff  --git a/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll b/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll
index 193956f63c30d..02e613f88a0aa 100644
--- a/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll
+++ b/llvm/test/CodeGen/AArch64/sve-forward-st-to-ld.ll
@@ -1,35 +1,35 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define <vscale x 2 x i64> @sti64ldi64(<vscale x 2 x i64>* nocapture %P, <vscale x 2 x i64> %v) {
+define <vscale x 2 x i64> @sti64ldi64(ptr nocapture %P, <vscale x 2 x i64> %v) {
 ; CHECK-LABEL: sti64ldi64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx0 = getelementptr inbounds <vscale x 2 x i64>, <vscale x 2 x i64>* %P, i64 1
-  store <vscale x 2 x i64> %v, <vscale x 2 x i64>* %arrayidx0
-  %arrayidx1 = getelementptr inbounds <vscale x 2 x i64>, <vscale x 2 x i64>* %P, i64 1
-  %0 = load <vscale x 2 x i64>, <vscale x 2 x i64>* %arrayidx1
+  %arrayidx0 = getelementptr inbounds <vscale x 2 x i64>, ptr %P, i64 1
+  store <vscale x 2 x i64> %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds <vscale x 2 x i64>, ptr %P, i64 1
+  %0 = load <vscale x 2 x i64>, ptr %arrayidx1
   ret <vscale x 2 x i64> %0
 }
 
-define <vscale x 2 x double> @stf64ldf64(<vscale x 2 x double>* nocapture %P, <vscale x 2 x double> %v) {
+define <vscale x 2 x double> @stf64ldf64(ptr nocapture %P, <vscale x 2 x double> %v) {
 ; CHECK-LABEL: stf64ldf64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx0 = getelementptr inbounds <vscale x 2 x double>, <vscale x 2 x double>* %P, i64 1
-  store <vscale x 2 x double> %v, <vscale x 2 x double>* %arrayidx0
-  %arrayidx1 = getelementptr inbounds <vscale x 2 x double>, <vscale x 2 x double>* %P, i64 1
-  %0 = load <vscale x 2 x double>, <vscale x 2 x double>* %arrayidx1
+  %arrayidx0 = getelementptr inbounds <vscale x 2 x double>, ptr %P, i64 1
+  store <vscale x 2 x double> %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds <vscale x 2 x double>, ptr %P, i64 1
+  %0 = load <vscale x 2 x double>, ptr %arrayidx1
   ret <vscale x 2 x double> %0
 }
 
-define <vscale x 2 x i64> @sti32ldi32ext(<vscale x 2 x i32>* nocapture %P, <vscale x 2 x i64> %v) {
+define <vscale x 2 x i64> @sti32ldi32ext(ptr nocapture %P, <vscale x 2 x i64> %v) {
 ; CHECK-LABEL: sti32ldi32ext:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -39,13 +39,13 @@ define <vscale x 2 x i64> @sti32ldi32ext(<vscale x 2 x i32>* nocapture %P, <vsca
 ; CHECK-NEXT:    ret
 entry:
   %0 = trunc <vscale x 2 x i64> %v to <vscale x 2 x i32>
-  store <vscale x 2 x i32> %0, <vscale x 2 x i32>* %P
-  %1 = load <vscale x 2 x i32>, <vscale x 2 x i32>* %P
+  store <vscale x 2 x i32> %0, ptr %P
+  %1 = load <vscale x 2 x i32>, ptr %P
   %2 = sext <vscale x 2 x i32> %1 to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %2
 }
 
-define <2 x i64> @sti64ldfixedi64(<vscale x 2 x i64>* nocapture %P, <vscale x 2 x i64> %v) {
+define <2 x i64> @sti64ldfixedi64(ptr nocapture %P, <vscale x 2 x i64> %v) {
 ; CHECK-LABEL: sti64ldfixedi64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -54,14 +54,14 @@ define <2 x i64> @sti64ldfixedi64(<vscale x 2 x i64>* nocapture %P, <vscale x 2
 ; CHECK-NEXT:    ldr q0, [x0, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx0 = getelementptr inbounds <vscale x 2 x i64>, <vscale x 2 x i64>* %P, i64 1
-  store <vscale x 2 x i64> %v, <vscale x 2 x i64>* %arrayidx0
-  %arrayidx1 = bitcast <vscale x 2 x i64>* %arrayidx0 to <2 x i64>*
-  %0 = load <2 x i64>, <2 x i64>* %arrayidx1
+  %arrayidx0 = getelementptr inbounds <vscale x 2 x i64>, ptr %P, i64 1
+  store <vscale x 2 x i64> %v, ptr %arrayidx0
+  %arrayidx1 = bitcast ptr %arrayidx0 to ptr
+  %0 = load <2 x i64>, ptr %arrayidx1
   ret <2 x i64> %0
 }
 
-define <vscale x 4 x i32> @sti64ldi32(<vscale x 2 x i64>* nocapture %P, <vscale x 2 x i64> %v) {
+define <vscale x 4 x i32> @sti64ldi32(ptr nocapture %P, <vscale x 2 x i64> %v) {
 ; CHECK-LABEL: sti64ldi32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -70,15 +70,15 @@ define <vscale x 4 x i32> @sti64ldi32(<vscale x 2 x i64>* nocapture %P, <vscale
 ; CHECK-NEXT:    ld1w { z0.s }, p1/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast <vscale x 2 x i64>* %P to <vscale x 4 x i32>*
-  %arrayidx0 = getelementptr inbounds <vscale x 2 x i64>, <vscale x 2 x i64>* %P, i64 1
-  store <vscale x 2 x i64> %v, <vscale x 2 x i64>* %arrayidx0
-  %arrayidx1 = getelementptr inbounds <vscale x 4 x i32>, <vscale x 4 x i32>* %0, i64 1
-  %1 = load <vscale x 4 x i32>, <vscale x 4 x i32>* %arrayidx1
+  %0 = bitcast ptr %P to ptr
+  %arrayidx0 = getelementptr inbounds <vscale x 2 x i64>, ptr %P, i64 1
+  store <vscale x 2 x i64> %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds <vscale x 4 x i32>, ptr %0, i64 1
+  %1 = load <vscale x 4 x i32>, ptr %arrayidx1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 2 x i64> @stf64ldi64(<vscale x 2 x double>* nocapture %P, <vscale x 2 x double> %v) {
+define <vscale x 2 x i64> @stf64ldi64(ptr nocapture %P, <vscale x 2 x double> %v) {
 ; CHECK-LABEL: stf64ldi64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -86,10 +86,10 @@ define <vscale x 2 x i64> @stf64ldi64(<vscale x 2 x double>* nocapture %P, <vsca
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast <vscale x 2 x double>* %P to <vscale x 2 x i64>*
-  %arrayidx0 = getelementptr inbounds <vscale x 2 x double>, <vscale x 2 x double>* %P, i64 1
-  store <vscale x 2 x double> %v, <vscale x 2 x double>* %arrayidx0
-  %arrayidx1 = getelementptr inbounds <vscale x 2 x i64>, <vscale x 2 x i64>* %0, i64 1
-  %1 = load <vscale x 2 x i64>, <vscale x 2 x i64>* %arrayidx1
+  %0 = bitcast ptr %P to ptr
+  %arrayidx0 = getelementptr inbounds <vscale x 2 x double>, ptr %P, i64 1
+  store <vscale x 2 x double> %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds <vscale x 2 x i64>, ptr %0, i64 1
+  %1 = load <vscale x 2 x i64>, ptr %arrayidx1
   ret <vscale x 2 x i64> %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fp.ll b/llvm/test/CodeGen/AArch64/sve-fp.ll
index 7d54f971f61ec..a3fc6ded5f9fa 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp.ll
@@ -510,15 +510,15 @@ define void @scalar_to_vector(ptr %outval, <vscale x 2 x i1> %pred, <vscale x 2
   ret void
 }
 
-define void @float_copy(<vscale x 4 x float>* %P1, <vscale x 4 x float>* %P2) {
+define void @float_copy(ptr %P1, ptr %P2) {
 ; CHECK-LABEL: float_copy:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %A = load <vscale x 4 x float>, <vscale x 4 x float>* %P1, align 16
-  store <vscale x 4 x float> %A, <vscale x 4 x float>* %P2, align 16
+  %A = load <vscale x 4 x float>, ptr %P1, align 16
+  store <vscale x 4 x float> %A, ptr %P2, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-fpext-load.ll b/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
index 5a600915bf79d..78decf5ed1e3c 100644
--- a/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fpext-load.ll
@@ -2,20 +2,20 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
 ; fpext <vscale x 2 x half> -> <vscale x 2 x double>
-define <vscale x 2 x double> @ext2_f16_f64(<vscale x 2 x half> *%ptr, i64 %index) {
+define <vscale x 2 x double> @ext2_f16_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext2_f16_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 2 x half>, <vscale x 2 x half>* %ptr, align 4
+  %load = load <vscale x 2 x half>, ptr %ptr, align 4
   %load.ext = fpext <vscale x 2 x half> %load to <vscale x 2 x double>
   ret <vscale x 2 x double> %load.ext
 }
 
 ; fpext <vscale x 4 x half> -> <vscale x 4 x double>
-define <vscale x 4 x double> @ext4_f16_f64(<vscale x 4 x half> *%ptr, i64 %index) {
+define <vscale x 4 x double> @ext4_f16_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext4_f16_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -28,13 +28,13 @@ define <vscale x 4 x double> @ext4_f16_f64(<vscale x 4 x half> *%ptr, i64 %index
 ; CHECK-NEXT:    movprfx z1, z2
 ; CHECK-NEXT:    fcvt z1.d, p0/m, z2.h
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 4 x half>, <vscale x 4 x half>* %ptr, align 4
+  %load = load <vscale x 4 x half>, ptr %ptr, align 4
   %load.ext = fpext <vscale x 4 x half> %load to <vscale x 4 x double>
   ret <vscale x 4 x double> %load.ext
 }
 
 ; fpext <vscale x 8 x half> -> <vscale x 8 x double>
-define <vscale x 8 x double> @ext8_f16_f64(<vscale x 8 x half> *%ptr, i64 %index) {
+define <vscale x 8 x double> @ext8_f16_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext8_f16_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -54,26 +54,26 @@ define <vscale x 8 x double> @ext8_f16_f64(<vscale x 8 x half> *%ptr, i64 %index
 ; CHECK-NEXT:    fcvt z3.d, p0/m, z4.h
 ; CHECK-NEXT:    fcvt z1.d, p0/m, z1.h
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 8 x half>, <vscale x 8 x half>* %ptr, align 4
+  %load = load <vscale x 8 x half>, ptr %ptr, align 4
   %load.ext = fpext <vscale x 8 x half> %load to <vscale x 8 x double>
   ret <vscale x 8 x double> %load.ext
 }
 
 ; fpext <vscale x 2 x float> -> <vscale x 2 x double>
-define <vscale x 2 x double> @ext2_f32_f64(<vscale x 2 x float> *%ptr, i64 %index) {
+define <vscale x 2 x double> @ext2_f32_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext2_f32_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 2 x float>, <vscale x 2 x float>* %ptr, align 4
+  %load = load <vscale x 2 x float>, ptr %ptr, align 4
   %load.ext = fpext <vscale x 2 x float> %load to <vscale x 2 x double>
   ret <vscale x 2 x double> %load.ext
 }
 
 ; fpext <vscale x 4 x float> -> <vscale x 4 x double>
-define <vscale x 4 x double> @ext4_f32_f64(<vscale x 4 x float> *%ptr, i64 %index) {
+define <vscale x 4 x double> @ext4_f32_f64(ptr %ptr, i64 %index) {
 ; CHECK-LABEL: ext4_f32_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -86,7 +86,7 @@ define <vscale x 4 x double> @ext4_f32_f64(<vscale x 4 x float> *%ptr, i64 %inde
 ; CHECK-NEXT:    movprfx z1, z2
 ; CHECK-NEXT:    fcvt z1.d, p0/m, z2.s
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 4 x float>, <vscale x 4 x float>* %ptr, align 4
+  %load = load <vscale x 4 x float>, ptr %ptr, align 4
   %load.ext = fpext <vscale x 4 x float> %load to <vscale x 4 x double>
   ret <vscale x 4 x double> %load.ext
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
index 106be131bf10b..d813294a0c415 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define void @fptrunc2_f64_f32(<vscale x 2 x float> *%dst, <vscale x 2 x double> *%src) {
+define void @fptrunc2_f64_f32(ptr %dst, ptr %src) {
 ; CHECK-LABEL: fptrunc2_f64_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -10,13 +10,13 @@ define void @fptrunc2_f64_f32(<vscale x 2 x float> *%dst, <vscale x 2 x double>
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load <vscale x 2 x double>, <vscale x 2 x double>* %src, align 8
+  %0 = load <vscale x 2 x double>, ptr %src, align 8
   %1 = fptrunc <vscale x 2 x double> %0 to <vscale x 2 x float>
-  store <vscale x 2 x float> %1, <vscale x 2 x float>* %dst, align 4
+  store <vscale x 2 x float> %1, ptr %dst, align 4
   ret void
 }
 
-define void @fptrunc2_f64_f16(<vscale x 2 x half> *%dst, <vscale x 2 x double> *%src) {
+define void @fptrunc2_f64_f16(ptr %dst, ptr %src) {
 ; CHECK-LABEL: fptrunc2_f64_f16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -25,13 +25,13 @@ define void @fptrunc2_f64_f16(<vscale x 2 x half> *%dst, <vscale x 2 x double> *
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load <vscale x 2 x double>, <vscale x 2 x double>* %src, align 8
+  %0 = load <vscale x 2 x double>, ptr %src, align 8
   %1 = fptrunc <vscale x 2 x double> %0 to <vscale x 2 x half>
-  store <vscale x 2 x half> %1, <vscale x 2 x half>* %dst, align 2
+  store <vscale x 2 x half> %1, ptr %dst, align 2
   ret void
 }
 
-define void @fptrunc4_f32_f16(<vscale x 4 x half> *%dst, <vscale x 4 x float> *%src) {
+define void @fptrunc4_f32_f16(ptr %dst, ptr %src) {
 ; CHECK-LABEL: fptrunc4_f32_f16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
@@ -40,13 +40,13 @@ define void @fptrunc4_f32_f16(<vscale x 4 x half> *%dst, <vscale x 4 x float> *%
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load <vscale x 4 x float>, <vscale x 4 x float>* %src, align 8
+  %0 = load <vscale x 4 x float>, ptr %src, align 8
   %1 = fptrunc <vscale x 4 x float> %0 to <vscale x 4 x half>
-  store <vscale x 4 x half> %1, <vscale x 4 x half>* %dst, align 2
+  store <vscale x 4 x half> %1, ptr %dst, align 2
   ret void
 }
 
-define void @fptrunc2_f32_f16(<vscale x 2 x half> *%dst, <vscale x 2 x float> *%src) {
+define void @fptrunc2_f32_f16(ptr %dst, ptr %src) {
 ; CHECK-LABEL: fptrunc2_f32_f16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -55,13 +55,13 @@ define void @fptrunc2_f32_f16(<vscale x 2 x half> *%dst, <vscale x 2 x float> *%
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load <vscale x 2 x float>, <vscale x 2 x float>* %src, align 8
+  %0 = load <vscale x 2 x float>, ptr %src, align 8
   %1 = fptrunc <vscale x 2 x float> %0 to <vscale x 2 x half>
-  store <vscale x 2 x half> %1, <vscale x 2 x half>* %dst, align 2
+  store <vscale x 2 x half> %1, ptr %dst, align 2
   ret void
 }
 
-define void @fptrunc8_f64_f16(<vscale x 8 x half> *%dst, <vscale x 8 x double> *%src) {
+define void @fptrunc8_f64_f16(ptr %dst, ptr %src) {
 ; CHECK-LABEL: fptrunc8_f64_f16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -80,8 +80,8 @@ define void @fptrunc8_f64_f16(<vscale x 8 x half> *%dst, <vscale x 8 x double> *
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load <vscale x 8 x double>, <vscale x 8 x double>* %src, align 8
+  %0 = load <vscale x 8 x double>, ptr %src, align 8
   %1 = fptrunc <vscale x 8 x double> %0 to <vscale x 8 x half>
-  store <vscale x 8 x half> %1, <vscale x 8 x half>* %dst, align 2
+  store <vscale x 8 x half> %1, ptr %dst, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
index 34918237433ca..f7a963186c139 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
@@ -12,7 +12,7 @@ define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
 ; CHECK-NEXT:    and z0.d, z0.d, #0xff
 ; CHECK-NEXT:    ret
                                                     <vscale x 2 x i64> %base,
-                                                    <vscale x 2 x i8>* %res_out,
+                                                    ptr %res_out,
                                                     <vscale x 2 x i1> %pred) {
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
                                                                                            <vscale x 2 x i64> %base,
@@ -20,7 +20,7 @@ define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
   %res1 = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   %res2 = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
-                                      <vscale x 2 x i8> *%res_out,
+                                      ptr %res_out,
                                       i32 8,
                                       <vscale x 2 x i1> %pred)
 
@@ -37,14 +37,14 @@ define <vscale x 2 x i64> @no_dag_combine_sext(<vscale x 2 x i1> %pg,
 ; CHECK-NEXT:    st1b { z1.d }, p1, [x0]
 ; CHECK-NEXT:    ret
                                                <vscale x 2 x i64> %base,
-                                               <vscale x 2 x i8>* %res_out,
+                                               ptr %res_out,
                                                <vscale x 2 x i1> %pred) {
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
                                                                                            <vscale x 2 x i64> %base,
                                                                                            i64 16)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
-                                      <vscale x 2 x i8> *%res_out,
+                                      ptr %res_out,
                                       i32 8,
                                       <vscale x 2 x i1> %pred)
 
@@ -59,21 +59,21 @@ define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
 ; CHECK-NEXT:    and z0.d, z0.d, #0xff
 ; CHECK-NEXT:    ret
                                                <vscale x 2 x i64> %base,
-                                               <vscale x 2 x i8>* %res_out,
+                                               ptr %res_out,
                                                <vscale x 2 x i1> %pred) {
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
                                                                                            <vscale x 2 x i64> %base,
                                                                                            i64 16)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
-                                      <vscale x 2 x i8> *%res_out,
+                                      ptr %res_out,
                                       i32 8,
                                       <vscale x 2 x i1> %pred)
 
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(i8* %out, i8* %in, <vscale x 16 x i8> %d, i64 %ptr){
+define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(ptr %out, ptr %in, <vscale x 16 x i8> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i8_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -90,16 +90,16 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(i8* %out, i8* %in, <v
 ; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
 ; CHECK-NEXT:    uzp1 z0.b, z1.b, z0.b
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i8, i8* %in, i64 %ptr
-  %2 = bitcast i8* %1 to <vscale x 16 x i8>*
-  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %2, align 1
+  %1 = getelementptr inbounds i8, ptr %in, i64 %ptr
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <vscale x 16 x i8>, ptr %2, align 1
   %3 = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
-  %4 = getelementptr inbounds i8, i8* %in, <vscale x 16 x i64> %3
-  %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x i8*> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
+  %4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
+  %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
   ret <vscale x 16 x i8> %wide.masked.gather
 }
 
-define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(i8* %out, i8* %in, <vscale x 16 x i8> %d, i64 %ptr){
+define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(ptr %out, ptr %in, <vscale x 16 x i8> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i8_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -116,16 +116,16 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(i8* %out, i8* %in, <v
 ; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
 ; CHECK-NEXT:    uzp1 z0.b, z1.b, z0.b
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i8, i8* %in, i64 %ptr
-  %2 = bitcast i8* %1 to <vscale x 16 x i8>*
-  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %2, align 1
+  %1 = getelementptr inbounds i8, ptr %in, i64 %ptr
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <vscale x 16 x i8>, ptr %2, align 1
   %3 = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
-  %4 = getelementptr inbounds i8, i8* %in, <vscale x 16 x i64> %3
-  %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x i8*> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
+  %4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
+  %wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
   ret <vscale x 16 x i8> %wide.masked.gather
 }
 
-define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(i16* %out, i16* %in, <vscale x 8 x i16> %d, i64 %ptr){
+define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(ptr %out, ptr %in, <vscale x 8 x i16> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -136,16 +136,16 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(i16* %out, i16* %in,
 ; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x1, z1.s, uxtw #1]
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i16, i16* %in, i64 %ptr
-  %2 = bitcast i16* %1 to <vscale x 8 x i16>*
-  %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %2, align 1
+  %1 = getelementptr inbounds i16, ptr %in, i64 %ptr
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <vscale x 8 x i16>, ptr %2, align 1
   %3 = zext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
-  %4 = getelementptr inbounds i16, i16* %in, <vscale x 8 x i64> %3
-  %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x i16*> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
+  %4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
+  %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
   ret <vscale x 8 x i16> %wide.masked.gather
 }
 
-define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(i16* %out, i16* %in, <vscale x 8 x i16> %d, i64 %ptr){
+define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(ptr %out, ptr %in, <vscale x 8 x i16> %d, i64 %ptr){
 ; CHECK-LABEL: narrow_i64_gather_index_i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -156,49 +156,49 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(i16* %out, i16* %in,
 ; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x1, z1.s, sxtw #1]
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i16, i16* %in, i64 %ptr
-  %2 = bitcast i16* %1 to <vscale x 8 x i16>*
-  %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %2, align 1
+  %1 = getelementptr inbounds i16, ptr %in, i64 %ptr
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <vscale x 8 x i16>, ptr %2, align 1
   %3 = sext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
-  %4 = getelementptr inbounds i16, i16* %in, <vscale x 8 x i64> %3
-  %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x i16*> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
+  %4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
+  %wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
   ret <vscale x 8 x i16> %wide.masked.gather
 }
 
-define <vscale x 4 x i32> @no_narrow_i64_gather_index_i32(i32* %out, i32* %in, <vscale x 4 x i32> %d, i64 %ptr){
+define <vscale x 4 x i32> @no_narrow_i64_gather_index_i32(ptr %out, ptr %in, <vscale x 4 x i32> %d, i64 %ptr){
 ; CHECK-LABEL: no_narrow_i64_gather_index_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x1, x2, lsl #2]
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x1, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i32, i32* %in, i64 %ptr
-  %2 = bitcast i32* %1 to <vscale x 4 x i32>*
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %2, align 1
+  %1 = getelementptr inbounds i32, ptr %in, i64 %ptr
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <vscale x 4 x i32>, ptr %2, align 1
   %3 = zext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
-  %4 = getelementptr inbounds i32, i32* %in, <vscale x 4 x i64> %3
-  %wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x i32*> %4, i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> undef)
+  %4 = getelementptr inbounds i32, ptr %in, <vscale x 4 x i64> %3
+  %wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %4, i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %wide.masked.gather
 }
 
-define <vscale x 2 x i64> @no_narrow_i64_gather_index_i64(i64* %out, i64* %in, <vscale x 2 x i64> %d, i64 %ptr){
+define <vscale x 2 x i64> @no_narrow_i64_gather_index_i64(ptr %out, ptr %in, <vscale x 2 x i64> %d, i64 %ptr){
 ; CHECK-LABEL: no_narrow_i64_gather_index_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1, x2, lsl #3]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x1, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i64, i64* %in, i64 %ptr
-  %2 = bitcast i64* %1 to <vscale x 2 x i64>*
-  %wide.load = load <vscale x 2 x i64>, <vscale x 2 x i64>* %2, align 1
-  %3 = getelementptr inbounds i64, i64* %in, <vscale x 2 x i64> %wide.load
-  %wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x i64*> %3, i32 1, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i64> undef)
+  %1 = getelementptr inbounds i64, ptr %in, i64 %ptr
+  %2 = bitcast ptr %1 to ptr
+  %wide.load = load <vscale x 2 x i64>, ptr %2, align 1
+  %3 = getelementptr inbounds i64, ptr %in, <vscale x 2 x i64> %wide.load
+  %wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %3, i32 1, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %wide.masked.gather
 }
 
 declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
-declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
-declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x i8*>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x i16*>, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>)
+declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-gep.ll b/llvm/test/CodeGen/AArch64/sve-gep.ll
index 8ebd0198e9099..61ef740a722e9 100644
--- a/llvm/test/CodeGen/AArch64/sve-gep.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gep.ll
@@ -1,37 +1,37 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define <vscale x 2 x i64>* @scalar_of_scalable_1(<vscale x 2 x i64>* %base) {
+define ptr @scalar_of_scalable_1(ptr %base) {
 ; CHECK-LABEL: scalar_of_scalable_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #4
 ; CHECK-NEXT:    add x0, x0, x8
 ; CHECK-NEXT:    ret
-  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 4
-  ret <vscale x 2 x i64>* %d
+  %d = getelementptr <vscale x 2 x i64>, ptr %base, i64 4
+  ret ptr %d
 }
 
-define <vscale x 2 x i64>* @scalar_of_scalable_2(<vscale x 2 x i64>* %base, i64 %offset) {
+define ptr @scalar_of_scalable_2(ptr %base, i64 %offset) {
 ; CHECK-LABEL: scalar_of_scalable_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    madd x0, x1, x8, x0
 ; CHECK-NEXT:    ret
-  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 %offset
-  ret <vscale x 2 x i64>* %d
+  %d = getelementptr <vscale x 2 x i64>, ptr %base, i64 %offset
+  ret ptr %d
 }
 
-define <vscale x 2 x i32>* @scalar_of_scalable_3(<vscale x 2 x i32>* %base, i64 %offset) {
+define ptr @scalar_of_scalable_3(ptr %base, i64 %offset) {
 ; CHECK-LABEL: scalar_of_scalable_3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnth x8
 ; CHECK-NEXT:    madd x0, x1, x8, x0
 ; CHECK-NEXT:    ret
-  %d = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 %offset
-  ret <vscale x 2 x i32>* %d
+  %d = getelementptr <vscale x 2 x i32>, ptr %base, i64 %offset
+  ret ptr %d
 }
 
-define <2 x <vscale x 2 x i64>*> @fixed_of_scalable_1(<vscale x 2 x i64>* %base) {
+define <2 x ptr> @fixed_of_scalable_1(ptr %base) {
 ; CHECK-LABEL: fixed_of_scalable_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -39,168 +39,168 @@ define <2 x <vscale x 2 x i64>*> @fixed_of_scalable_1(<vscale x 2 x i64>* %base)
 ; CHECK-NEXT:    dup v0.2d, x8
 ; CHECK-NEXT:    add v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, <2 x i64> <i64 1, i64 1>
-  ret <2 x <vscale x 2 x i64>*> %d
+  %d = getelementptr <vscale x 2 x i64>, ptr %base, <2 x i64> <i64 1, i64 1>
+  ret <2 x ptr> %d
 }
 
-define <2 x <vscale x 2 x i64>*> @fixed_of_scalable_2(<2 x <vscale x 2 x i64>*> %base) {
+define <2 x ptr> @fixed_of_scalable_2(<2 x ptr> %base) {
 ; CHECK-LABEL: fixed_of_scalable_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %d = getelementptr <vscale x 2 x i64>, <2 x <vscale x 2 x i64>*> %base, <2 x i64> <i64 1, i64 1>
-  ret <2 x <vscale x 2 x i64>*> %d
+  %d = getelementptr <vscale x 2 x i64>, <2 x ptr> %base, <2 x i64> <i64 1, i64 1>
+  ret <2 x ptr> %d
 }
 
-define <vscale x 2 x i8*> @scalable_of_fixed_1(i8* %base) {
+define <vscale x 2 x ptr> @scalable_of_fixed_1(ptr %base) {
 ; CHECK-LABEL: scalable_of_fixed_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #1
 ; CHECK-NEXT:    mov z0.d, x8
 ; CHECK-NEXT:    ret
   %idx = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 1, i32 0), <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i32> zeroinitializer
-  %d = getelementptr i8, i8* %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x i8*> %d
+  %d = getelementptr i8, ptr %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i8*> @scalable_of_fixed_2(<vscale x 2 x i8*> %base) {
+define <vscale x 2 x ptr> @scalable_of_fixed_2(<vscale x 2 x ptr> %base) {
 ; CHECK-LABEL: scalable_of_fixed_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add z0.d, z0.d, #1 // =0x1
 ; CHECK-NEXT:    ret
   %idx = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 1, i32 0), <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i32> zeroinitializer
-  %d = getelementptr i8, <vscale x 2 x i8*> %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x i8*> %d
+  %d = getelementptr i8, <vscale x 2 x ptr> %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i8*> @scalable_of_fixed_3_i8(i8* %base, <vscale x 2 x i64> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_3_i8(ptr %base, <vscale x 2 x i64> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_3_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    add z0.d, z1.d, z0.d
 ; CHECK-NEXT:    ret
-  %d = getelementptr i8, i8* %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x i8*> %d
+  %d = getelementptr i8, ptr %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i16*> @scalable_of_fixed_3_i16(i16* %base, <vscale x 2 x i64> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_3_i16(ptr %base, <vscale x 2 x i64> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_3_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
-  %d = getelementptr i16, i16* %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x i16*> %d
+  %d = getelementptr i16, ptr %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i32*> @scalable_of_fixed_3_i32(i32* %base, <vscale x 2 x i64> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_3_i32(ptr %base, <vscale x 2 x i64> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_3_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
-  %d = getelementptr i32, i32* %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x i32*> %d
+  %d = getelementptr i32, ptr %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i64*> @scalable_of_fixed_3_i64(i64* %base, <vscale x 2 x i64> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_3_i64(ptr %base, <vscale x 2 x i64> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_3_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %d = getelementptr i64, i64* %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x i64*> %d
+  %d = getelementptr i64, ptr %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i8*> @scalable_of_fixed_4_i8(i8* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_4_i8(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_4_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %d = getelementptr i8, i8* %base, <vscale x 2 x i32> %idx
-  ret <vscale x 2 x i8*> %d
+  %d = getelementptr i8, ptr %base, <vscale x 2 x i32> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i16*> @scalable_of_fixed_4_i16(i16* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_4_i16(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_4_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
-  %d = getelementptr i16, i16* %base, <vscale x 2 x i32> %idx
-  ret <vscale x 2 x i16*> %d
+  %d = getelementptr i16, ptr %base, <vscale x 2 x i32> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i32*> @scalable_of_fixed_4_i32(i32* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_4_i32(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_4_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
-  %d = getelementptr i32, i32* %base, <vscale x 2 x i32> %idx
-  ret <vscale x 2 x i32*> %d
+  %d = getelementptr i32, ptr %base, <vscale x 2 x i32> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i64*> @scalable_of_fixed_4_i64(i64* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_4_i64(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_4_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
-  %d = getelementptr i64, i64* %base, <vscale x 2 x i32> %idx
-  ret <vscale x 2 x i64*> %d
+  %d = getelementptr i64, ptr %base, <vscale x 2 x i32> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i8*> @scalable_of_fixed_5(i8* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_5(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_5:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
-  %d = getelementptr i8, i8* %base, <vscale x 2 x i64> %idxZext
-  ret <vscale x 2 x i8*> %d
+  %d = getelementptr i8, ptr %base, <vscale x 2 x i64> %idxZext
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i16*> @scalable_of_fixed_5_i16(i16* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_5_i16(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_5_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
-  %d = getelementptr i16, i16* %base, <vscale x 2 x i64> %idxZext
-  ret <vscale x 2 x i16*> %d
+  %d = getelementptr i16, ptr %base, <vscale x 2 x i64> %idxZext
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x i32*> @scalable_of_fixed_5_i32(i32* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_5_i32(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_5_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
-  %d = getelementptr i32, i32* %base, <vscale x 2 x i64> %idxZext
-  ret <vscale x 2 x i32*> %d
+  %d = getelementptr i32, ptr %base, <vscale x 2 x i64> %idxZext
+  ret <vscale x 2 x ptr> %d
 }
 
 
-define <vscale x 2 x i64*> @scalable_of_fixed_5_i64(i64* %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_fixed_5_i64(ptr %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_fixed_5_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z1.d, x0
 ; CHECK-NEXT:    adr z0.d, [z1.d, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
-  %d = getelementptr i64, i64* %base, <vscale x 2 x i64> %idxZext
-  ret <vscale x 2 x i64*> %d
+  %d = getelementptr i64, ptr %base, <vscale x 2 x i64> %idxZext
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_1(<vscale x 2 x i64>* %base) {
+define <vscale x 2 x ptr> @scalable_of_scalable_1(ptr %base) {
 ; CHECK-LABEL: scalable_of_scalable_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -208,21 +208,21 @@ define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_1(<vscale x 2 x
 ; CHECK-NEXT:    mov z0.d, x8
 ; CHECK-NEXT:    ret
   %idx = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 1, i32 0), <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i32> zeroinitializer
-  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x <vscale x 2 x i64>*> %d
+  %d = getelementptr <vscale x 2 x i64>, ptr %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_2(<vscale x 2 x <vscale x 2 x i64>*> %base) {
+define <vscale x 2 x ptr> @scalable_of_scalable_2(<vscale x 2 x ptr> %base) {
 ; CHECK-LABEL: scalable_of_scalable_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    incd z0.d, all, mul #8
 ; CHECK-NEXT:    ret
   %idx = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 1, i32 0), <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i32> zeroinitializer
-  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x <vscale x 2 x i64>*> %base, <vscale x 2 x i64> %idx
-  ret <vscale x 2 x <vscale x 2 x i64>*> %d
+  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x ptr> %base, <vscale x 2 x i64> %idx
+  ret <vscale x 2 x ptr> %d
 }
 
-define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_3(<vscale x 2 x <vscale x 2 x i64>*> %base, <vscale x 2 x i32> %idx) {
+define <vscale x 2 x ptr> @scalable_of_scalable_3(<vscale x 2 x ptr> %base, <vscale x 2 x i32> %idx) {
 ; CHECK-LABEL: scalable_of_scalable_3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -231,6 +231,6 @@ define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_3(<vscale x 2 x
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
 ; CHECK-NEXT:    mla z0.d, p0/m, z1.d, z2.d
 ; CHECK-NEXT:    ret
-  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x <vscale x 2 x i64>*> %base, <vscale x 2 x i32> %idx
-  ret <vscale x 2 x <vscale x 2 x i64>*> %d
+  %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x ptr> %base, <vscale x 2 x i32> %idx
+  ret <vscale x 2 x ptr> %d
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index 5ffd7f1dfe165..4a5e272582d8e 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -139,7 +139,7 @@ define <vscale x 16 x i8> @insert_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec, <
 
 ; Insert subvectors into illegal vectors
 
-define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64> %sv1, <vscale x 16 x i64>* %out) {
+define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64> %sv1, ptr %out) {
 ; CHECK-LABEL: insert_nxv8i64_nxv16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -154,11 +154,11 @@ define void @insert_nxv8i64_nxv16i64(<vscale x 8 x i64> %sv0, <vscale x 8 x i64>
 ; CHECK-NEXT:    ret
   %v0 = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
   %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> %v0, <vscale x 8 x i64> %sv1, i64 8)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, <vscale x 16 x i64>* %out) {
+define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, ptr %out) {
 ; CHECK-LABEL: insert_nxv8i64_nxv16i64_lo:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -168,11 +168,11 @@ define void @insert_nxv8i64_nxv16i64_lo(<vscale x 8 x i64> %sv0, <vscale x 16 x
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 0)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x i64>* %out) {
+define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, ptr %out) {
 ; CHECK-LABEL: insert_nxv8i64_nxv16i64_hi:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -182,11 +182,11 @@ define void @insert_nxv8i64_nxv16i64_hi(<vscale x 8 x i64> %sv0, <vscale x 16 x
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vector.insert.nxv8i64.nxv16i64(<vscale x 16 x i64> undef, <vscale x 8 x i64> %sv0, i64 8)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16 x i64>* %out) uwtable {
+define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, ptr %out) uwtable {
 ; CHECK-LABEL: insert_v2i64_nxv16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -214,24 +214,24 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, <vscale x 16
 ; CHECK-NEXT:    ret
   %v0 = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv0, i64 0)
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> %v0, <2 x i64> %sv1, i64 4)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_v2i64_nxv16i64_lo0(<2 x i64>* %psv, <vscale x 16 x i64>* %out) {
+define void @insert_v2i64_nxv16i64_lo0(ptr %psv, ptr %out) {
 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %sv = load <2 x i64>, <2 x i64>* %psv
+  %sv = load <2 x i64>, ptr %psv
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 0)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
-define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %out) uwtable {
+define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) uwtable {
 ; CHECK-LABEL: insert_v2i64_nxv16i64_lo2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -252,9 +252,9 @@ define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, <vscale x 16 x i64>* %ou
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    .cfi_restore w29
 ; CHECK-NEXT:    ret
-  %sv = load <2 x i64>, <2 x i64>* %psv
+  %sv = load <2 x i64>, ptr %psv
   %v = call <vscale x 16 x i64> @llvm.vector.insert.v2i64.nxv16i64(<vscale x 16 x i64> undef, <2 x i64> %sv, i64 2)
-  store <vscale x 16 x i64> %v, <vscale x 16 x i64>* %out
+  store <vscale x 16 x i64> %v, ptr %out
   ret void
 }
 
@@ -385,7 +385,7 @@ define <vscale x 2 x i64> @insert_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec, <
   ret <vscale x 2 x i64> %retval
 }
 
-define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, <4 x i64>* %ptr) nounwind #0 {
+define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, ptr %ptr) nounwind #0 {
 ; CHECK-LABEL: insert_fixed_v4i64_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
@@ -398,7 +398,7 @@ define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, <
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %subvec = load <4 x i64>, <4 x i64>* %ptr
+  %subvec = load <4 x i64>, ptr %ptr
   %retval = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v4i64(<vscale x 2 x i64> %vec, <4 x i64> %subvec, i64 4)
   ret <vscale x 2 x i64> %retval
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
index 486f59d7900e9..cb2b2f34ca5ef 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
@@ -425,7 +425,7 @@ define <vscale x 2 x i64> @mla_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 16 x i8> @mla_i8_multiuse(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8>* %p) {
+define <vscale x 16 x i8> @mla_i8_multiuse(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, ptr %p) {
 ; CHECK-LABEL: mla_i8_multiuse:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
@@ -434,7 +434,7 @@ define <vscale x 16 x i8> @mla_i8_multiuse(<vscale x 16 x i8> %a, <vscale x 16 x
 ; CHECK-NEXT:    st1b { z1.b }, p0, [x0]
 ; CHECK-NEXT:    ret
   %prod = mul <vscale x 16 x i8> %a, %b
-  store <vscale x 16 x i8> %prod, <vscale x 16 x i8>* %p
+  store <vscale x 16 x i8> %prod, ptr %p
   %res = add <vscale x 16 x i8> %c, %prod
   ret <vscale x 16 x i8> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
index 8b5f23e5b0987..3b2c7fed1d807 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
@@ -119,7 +119,7 @@ entry:
 ; Testing imm limits of SI form
 ;
 
-define void @test_svprf_vnum_under(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %base) {
+define void @test_svprf_vnum_under(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_vnum_under:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    rdvl x8, #1
@@ -129,23 +129,23 @@ define void @test_svprf_vnum_under(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %
 ; CHECK-NEXT:    prfb pstl3strm, p0, [x0, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 -33, i64 0
+  %gep = getelementptr inbounds <vscale x 16 x i8>, ptr %base, i64 -33, i64 0
   tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
-define void @test_svprf_vnum_min(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %base) {
+define void @test_svprf_vnum_min(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_vnum_min:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl3strm, p0, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 -32, i64 0
+  %gep = getelementptr inbounds <vscale x 16 x i8>, ptr %base, i64 -32, i64 0
   tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
-define void @test_svprf_vnum_over(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %base) {
+define void @test_svprf_vnum_over(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_vnum_over:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    rdvl x8, #1
@@ -155,18 +155,18 @@ define void @test_svprf_vnum_over(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %b
 ; CHECK-NEXT:    prfb pstl3strm, p0, [x0, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 32, i64 0
+  %gep = getelementptr inbounds <vscale x 16 x i8>, ptr %base, i64 32, i64 0
   tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
-define void @test_svprf_vnum_max(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %base) {
+define void @test_svprf_vnum_max(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_vnum_max:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl3strm, p0, [x0, #31, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 31, i64 0
+  %gep = getelementptr inbounds <vscale x 16 x i8>, ptr %base, i64 31, i64 0
   tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
@@ -220,38 +220,38 @@ entry:
 ;
 ; imm form of prfb is tested above
 
-define void @test_svprfh_vnum(<vscale x 8 x i1> %pg, <vscale x 8 x i16>* %base) {
+define void @test_svprfh_vnum(<vscale x 8 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprfh_vnum:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfh pstl3strm, p0, [x0, #31, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base, i64 31
-  %addr = bitcast <vscale x 8 x i16>* %gep to ptr
+  %gep = getelementptr <vscale x 8 x i16>, ptr %base, i64 31
+  %addr = bitcast ptr %gep to ptr
   tail call void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1> %pg, ptr %addr, i32 13)
   ret void
 }
 
-define void @test_svprfw_vnum(<vscale x 4 x i1> %pg, <vscale x 4 x i32>* %base) {
+define void @test_svprfw_vnum(<vscale x 4 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprfw_vnum:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfw pstl3strm, p0, [x0, #31, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base, i64 31
-  %addr = bitcast <vscale x 4 x i32>* %gep to ptr
+  %gep = getelementptr <vscale x 4 x i32>, ptr %base, i64 31
+  %addr = bitcast ptr %gep to ptr
   tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, ptr %addr, i32 13)
   ret void
 }
 
-define void @test_svprfd_vnum(<vscale x 2 x i1> %pg, <vscale x 2 x i64>* %base) {
+define void @test_svprfd_vnum(<vscale x 2 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprfd_vnum:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfd pstl3strm, p0, [x0, #31, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 31
-  %addr = bitcast <vscale x 2 x i64>* %gep to ptr
+  %gep = getelementptr <vscale x 2 x i64>, ptr %base, i64 31
+  %addr = bitcast ptr %gep to ptr
   tail call void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1> %pg, ptr %addr, i32 13)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
index af17c4eac95fd..d9d0ee2a2a1d0 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
@@ -10,8 +10,8 @@ define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -21,8 +21,8 @@ define <vscale x 16 x i8> @ld1b_inbound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -32,8 +32,8 @@ define <vscale x 4 x i32> @ld1b_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %base = getelementptr <vscale x 4 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -44,8 +44,8 @@ define <vscale x 4 x i32> @ld1sb_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %base = getelementptr <vscale x 4 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -56,8 +56,8 @@ define <vscale x 16 x i8> @ld1b_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -8
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -68,8 +68,8 @@ define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i1> %pg, ptr %
 ; CHECK-NEXT:    rdvl x8, #8
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 8
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -80,8 +80,8 @@ define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i1> %pg, ptr %
 ; CHECK-NEXT:    rdvl x8, #-9
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -9
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -95,8 +95,8 @@ define <vscale x 8 x i16> @ld1b_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %base = getelementptr <vscale x 8 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
@@ -107,8 +107,8 @@ define <vscale x 8 x i16> @ld1sb_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %base = getelementptr <vscale x 8 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
@@ -119,8 +119,8 @@ define <vscale x 8 x i16> @ld1h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 1
-  %base_scalar = bitcast <vscale x 8 x i16>* %base to ptr
+  %base = getelementptr <vscale x 8 x i16>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x i16> %load
 }
@@ -130,8 +130,8 @@ define <vscale x 4 x i32> @ld1h_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %base = getelementptr <vscale x 4 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -142,8 +142,8 @@ define <vscale x 4 x i32> @ld1sh_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %base = getelementptr <vscale x 4 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -154,8 +154,8 @@ define <vscale x 2 x i64> @ld1b_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %base = getelementptr <vscale x 2 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -166,8 +166,8 @@ define <vscale x 2 x i64> @ld1sb_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %base = getelementptr <vscale x 2 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -178,8 +178,8 @@ define <vscale x 2 x i64> @ld1h_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %base = getelementptr <vscale x 2 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -190,8 +190,8 @@ define <vscale x 2 x i64> @ld1sh_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %base = getelementptr <vscale x 2 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -202,8 +202,8 @@ define <vscale x 8 x half> @ld1h_f16_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %a, i64 1
-  %base_scalar = bitcast <vscale x 8 x half>* %base to ptr
+  %base = getelementptr <vscale x 8 x half>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x half> %load
 }
@@ -213,8 +213,8 @@ define <vscale x 8 x bfloat> @ld1h_bf16_inbound(<vscale x 8 x i1> %pg, ptr %a) #
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %a, i64 1
-  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to ptr
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x bfloat> %load
 }
@@ -228,8 +228,8 @@ define <vscale x 4 x i32> @ld1w_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i32>* %base to ptr
+  %base = getelementptr <vscale x 4 x i32>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x i32> %load
 }
@@ -239,8 +239,8 @@ define <vscale x 4 x float> @ld1w_f32_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x float>* %base to ptr
+  %base = getelementptr <vscale x 4 x float>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x float> %load
 }
@@ -254,8 +254,8 @@ define <vscale x 2 x i64> @ld1d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 1
-  %base_scalar = bitcast <vscale x 2 x i64>* %base to ptr
+  %base = getelementptr <vscale x 2 x i64>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x i64> %load
 }
@@ -265,8 +265,8 @@ define <vscale x 2 x i64> @ld1w_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %base = getelementptr <vscale x 2 x i32>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -277,8 +277,8 @@ define <vscale x 2 x i64> @ld1sw_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %base = getelementptr <vscale x 2 x i32>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -289,8 +289,8 @@ define <vscale x 2 x double> @ld1d_f64_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %a, i64 1
-  %base_scalar = bitcast <vscale x 2 x double>* %base to ptr
+  %base = getelementptr <vscale x 2 x double>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x double> %load
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
index 721ba3acde260..e88eaaae3f63e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
@@ -9,388 +9,388 @@
 ; elements of the structure store, which is <N> = 2, 3, 4.
 
 ; ld2b
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 2
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8*
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 2
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_lower_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv32i8_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_upper_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv32i8_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 14
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_not_multiple_of_2(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_not_multiple_of_2(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv32i8_not_multiple_of_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #3
 ; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 3
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_outside_lower_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv32i8_outside_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #-18
 ; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -18
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -18
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_outside_upper_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv32i8_outside_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #16
 ; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 16
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 16
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
 ; ld2h
-define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16>* %addr) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 14
-  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 14
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half>* %addr) {
+define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat>* %addr) #0 {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, ptr %addr) #0 {
 ; CHECK-LABEL: ld2.nxv16bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #12, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 12
-  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 12
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
 ; ld2w
-define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32>* %addr) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 14
-  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 14
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
+define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
 ; ld2d
-define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64>* %addr) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 14
-  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 14
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double>* %addr) {
+define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld2.nxv4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
 ; ld3b
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv48i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 3
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_lower_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv48i8_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_upper_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv48i8_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 21
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_not_multiple_of_3_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_not_multiple_of_3_01(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_01:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #4
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 4
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_not_multiple_of_3_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_not_multiple_of_3_02(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_02:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #5
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 5
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_outside_lower_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv48i8_outside_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #-27
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -27
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -27
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_outside_upper_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv48i8_outside_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #24
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 24
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 24
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
 ; ld3h
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24i16(<vscale x 8 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv24i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 21
-  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 21
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nxv24f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nxv24f16(<vscale x 8 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv24f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 21
-  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 21
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, ptr %addr) #0 {
 ; CHECK-LABEL: ld3.nxv24bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
 ; ld3w
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12i32(<vscale x 4 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv12i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 21
-  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 21
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3.nxv12f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float> *%addr) {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3.nxv12f32(<vscale x 4 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv12f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
 ; ld3d
-define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i64(<vscale x 2 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv6i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 21
-  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 21
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3.nxv6f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3.nxv6f64(<vscale x 2 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld3.nxv6f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
 ; ; ld4b
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 4
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_upper_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 28
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_01(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_01:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #5
 ; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 5
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_02(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_02:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #6
 ; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 6
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 6
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_03(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_03(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_03:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #7
 ; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 7
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 7
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_outside_lower_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8_outside_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -403,13 +403,13 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
 ; xM = -9 * 2^6
 ; xP = RDVL * 2^-4
 ; xOFFSET = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -36
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -36
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_outside_upper_bound(<vscale x 16 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv64i8_outside_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -422,118 +422,118 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
 ; xM = 2^9
 ; xP = RDVL * 2^-4
 ; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 32
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 32
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
 ; ld4h
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4.nxv32i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4.nxv32i16(<vscale x 8 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, #8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 8
-  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 8
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4.nxv32f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4.nxv32f16(<vscale x 8 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv32f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 28
-  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 28
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, ptr %addr) #0 {
 ; CHECK-LABEL: ld4.nxv32bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
 ; ld4w
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4.nxv16i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4.nxv16i32(<vscale x 4 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 28
-  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 28
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4.nxv16f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4.nxv16f32(<vscale x 4 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
 ; ld4d
-define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4.nxv8i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4.nxv8i64(<vscale x 2 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 28
-  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 28
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4.nxv8f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4.nxv8f64(<vscale x 2 x i1> %Pg, ptr %addr) {
 ; CHECK-LABEL: ld4.nxv8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %Pg, double * %base_ptr)
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %Pg, ptr %base_ptr)
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
-declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1>, i8*)
-declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1>, i16*)
-declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1>, i32*)
-declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1>, i64*)
-declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1>, half*)
-declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1>, float*)
-declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1>, double*)
-
-declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1>, i8*)
-declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1>, i16*)
-declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1>, i32*)
-declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1>, i64*)
-declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1>, half*)
-declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1>, float*)
-declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1>, double*)
-
-declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1>, i8*)
-declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1>, i16*)
-declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1>, i32*)
-declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1>, i64*)
-declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1>, half*)
-declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1>, float*)
-declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1>, double*)
+declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1>, ptr)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1>, ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1>, ptr)
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1>, ptr)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1>, ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1>, ptr)
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1>, ptr)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1>, ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
index 6764a910edb13..4153f0be611a1 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst-ext.ll
@@ -5,7 +5,7 @@
 ; LD1SB/LD1B
 ;
 
-define <vscale x 16 x i32> @ld1b_i8_sext_i32(<vscale x 16 x i8> *%base) {
+define <vscale x 16 x i32> @ld1b_i8_sext_i32(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_sext_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -14,12 +14,12 @@ define <vscale x 16 x i32> @ld1b_i8_sext_i32(<vscale x 16 x i8> *%base) {
 ; CHECK-NEXT:    ld1sb { z2.s }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1sb { z3.s }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %wide.load = load <vscale x 16 x i8>, ptr %base
   %res = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %res
 }
 
-define <vscale x 16 x i32> @ld1b_i8_zext_i32(<vscale x 16 x i8> *%base) {
+define <vscale x 16 x i32> @ld1b_i8_zext_i32(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_zext_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -28,12 +28,12 @@ define <vscale x 16 x i32> @ld1b_i8_zext_i32(<vscale x 16 x i8> *%base) {
 ; CHECK-NEXT:    ld1b { z2.s }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1b { z3.s }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %wide.load = load <vscale x 16 x i8>, ptr %base
   %res = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %res
 }
 
-define <vscale x 16 x i64> @ld1b_i8_sext(<vscale x 16 x i8> *%base) {
+define <vscale x 16 x i64> @ld1b_i8_sext(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -46,12 +46,12 @@ define <vscale x 16 x i64> @ld1b_i8_sext(<vscale x 16 x i8> *%base) {
 ; CHECK-NEXT:    ld1sb { z6.d }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    ld1sb { z7.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %wide.load = load <vscale x 16 x i8>, ptr %base
   %res = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
   ret <vscale x 16 x i64> %res
 }
 
-define <vscale x 16 x i64> @ld1b_i8_zext(<vscale x 16 x i8> *%base) {
+define <vscale x 16 x i64> @ld1b_i8_zext(ptr %base) {
 ; CHECK-LABEL: ld1b_i8_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -64,7 +64,7 @@ define <vscale x 16 x i64> @ld1b_i8_zext(<vscale x 16 x i8> *%base) {
 ; CHECK-NEXT:    ld1b { z6.d }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    ld1b { z7.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %wide.load = load <vscale x 16 x i8>, ptr %base
   %res = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
   ret <vscale x 16 x i64> %res
 }
@@ -73,7 +73,7 @@ define <vscale x 16 x i64> @ld1b_i8_zext(<vscale x 16 x i8> *%base) {
 ; LD1H
 ;
 
-define <vscale x 8 x i64> @ld1h_i16_sext(<vscale x 8 x i16> *%base) {
+define <vscale x 8 x i64> @ld1h_i16_sext(ptr %base) {
 ; CHECK-LABEL: ld1h_i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -82,12 +82,12 @@ define <vscale x 8 x i64> @ld1h_i16_sext(<vscale x 8 x i16> *%base) {
 ; CHECK-NEXT:    ld1sh { z2.d }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1sh { z3.d }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base
+  %wide.load = load <vscale x 8 x i16>, ptr %base
   %res = sext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %res
 }
 
-define <vscale x 8 x i64> @ld1h_i16_zext(<vscale x 8 x i16> *%base) {
+define <vscale x 8 x i64> @ld1h_i16_zext(ptr %base) {
 ; CHECK-LABEL: ld1h_i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -96,7 +96,7 @@ define <vscale x 8 x i64> @ld1h_i16_zext(<vscale x 8 x i16> *%base) {
 ; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base
+  %wide.load = load <vscale x 8 x i16>, ptr %base
   %res = zext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %res
 }
@@ -105,26 +105,26 @@ define <vscale x 8 x i64> @ld1h_i16_zext(<vscale x 8 x i16> *%base) {
 ; LD1W
 ;
 
-define <vscale x 4 x i64> @ld1w_i32_sext(<vscale x 4 x i32> *%base) {
+define <vscale x 4 x i64> @ld1w_i32_sext(ptr %base) {
 ; CHECK-LABEL: ld1w_i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base
+  %wide.load = load <vscale x 4 x i32>, ptr %base
   %res = sext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %res
 }
 
-define <vscale x 4 x i64> @ld1w_i32_zext(<vscale x 4 x i32> *%base) {
+define <vscale x 4 x i64> @ld1w_i32_zext(ptr %base) {
 ; CHECK-LABEL: ld1w_i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base
+  %wide.load = load <vscale x 4 x i32>, ptr %base
   %res = zext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
index f86d999340184..ef24f575fba6a 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
@@ -21,8 +21,8 @@ define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, ptr
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -9
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -32,8 +32,8 @@ define <vscale x 16 x i8> @ldnf1b_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -8
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -43,8 +43,8 @@ define <vscale x 16 x i8> @ldnf1b_inbound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -54,8 +54,8 @@ define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -67,8 +67,8 @@ define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, ptr
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 8
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
@@ -88,8 +88,8 @@ define <vscale x 8 x i16> @ldnf1b_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %base = getelementptr <vscale x 8 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
@@ -110,8 +110,8 @@ define <vscale x 8 x i16> @ldnf1sb_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %base = getelementptr <vscale x 8 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
@@ -131,8 +131,8 @@ define <vscale x 8 x i16> @ldnf1h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 1
-  %base_scalar = bitcast <vscale x 8 x i16>* %base to ptr
+  %base = getelementptr <vscale x 8 x i16>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x i16> %load
 }
@@ -160,8 +160,8 @@ define <vscale x 8 x half> @ldnf1h_f16_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %a, i64 1
-  %base_scalar = bitcast <vscale x 8 x half>* %base to ptr
+  %base = getelementptr <vscale x 8 x half>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x half> %load
 }
@@ -171,8 +171,8 @@ define <vscale x 8 x bfloat> @ldnf1h_bf16_inbound(<vscale x 8 x i1> %pg, ptr %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %a, i64 1
-  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to ptr
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x bfloat> %load
 }
@@ -192,8 +192,8 @@ define <vscale x 4 x i32> @ldnf1b_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %base = getelementptr <vscale x 4 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -214,8 +214,8 @@ define <vscale x 4 x i32> @ldnf1sb_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %base = getelementptr <vscale x 4 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -236,8 +236,8 @@ define <vscale x 4 x i32> @ldnf1h_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %base = getelementptr <vscale x 4 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -258,8 +258,8 @@ define <vscale x 4 x i32> @ldnf1sh_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sh { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %base = getelementptr <vscale x 4 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
@@ -279,8 +279,8 @@ define <vscale x 4 x i32> @ldnf1w_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i32>* %base to ptr
+  %base = getelementptr <vscale x 4 x i32>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x i32> %load
 }
@@ -299,8 +299,8 @@ define <vscale x 4 x float> @ldnf1w_f32_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x float>* %base to ptr
+  %base = getelementptr <vscale x 4 x float>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x float> %load
 }
@@ -320,8 +320,8 @@ define <vscale x 2 x i64> @ldnf1b_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %base = getelementptr <vscale x 2 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -342,8 +342,8 @@ define <vscale x 2 x i64> @ldnf1sb_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %base = getelementptr <vscale x 2 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -364,8 +364,8 @@ define <vscale x 2 x i64> @ldnf1h_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %base = getelementptr <vscale x 2 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -386,8 +386,8 @@ define <vscale x 2 x i64> @ldnf1sh_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sh { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %base = getelementptr <vscale x 2 x i16>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -408,8 +408,8 @@ define <vscale x 2 x i64> @ldnf1w_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %base = getelementptr <vscale x 2 x i32>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -430,8 +430,8 @@ define <vscale x 2 x i64> @ldnf1sw_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sw { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %base = getelementptr <vscale x 2 x i32>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -451,8 +451,8 @@ define <vscale x 2 x i64> @ldnf1d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 1
-  %base_scalar = bitcast <vscale x 2 x i64>* %base to ptr
+  %base = getelementptr <vscale x 2 x i64>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x i64> %load
 }
@@ -471,8 +471,8 @@ define <vscale x 2 x double> @ldnf1d_f64_inbound(<vscale x 2 x i1> %pg, ptr %a)
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %a, i64 1
-  %base_scalar = bitcast <vscale x 2 x double>* %base to ptr
+  %base = getelementptr <vscale x 2 x double>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x double> %load
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
index c495e983818f7..5d8ee3c8abcf3 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
@@ -5,7 +5,7 @@
 ; LD1B
 ;
 
-define <vscale x 16 x i32> @masked_ld1b_i8_sext_i32(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i32> @masked_ld1b_i8_sext_i32(ptr %base, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_i8_sext_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
@@ -16,12 +16,12 @@ define <vscale x 16 x i32> @masked_ld1b_i8_sext_i32(<vscale x 16 x i8> *%base, <
 ; CHECK-NEXT:    sunpklo z2.s, z3.h
 ; CHECK-NEXT:    sunpkhi z3.s, z3.h
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %res
 }
 
-define <vscale x 8 x i32> @masked_ld1b_nxv8i8_sext_i32(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i32> @masked_ld1b_nxv8i8_sext_i32(ptr %a, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_nxv8i8_sext_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z1.h }, p0/z, [x0]
@@ -33,7 +33,7 @@ define <vscale x 8 x i32> @masked_ld1b_nxv8i8_sext_i32(<vscale x 8 x i8> *%a, <v
   ret <vscale x 8 x i32> %res
 }
 
-define <vscale x 16 x i32> @masked_ld1b_i8_zext_i32(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i32> @masked_ld1b_i8_zext_i32(ptr %base, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_i8_zext_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
@@ -44,12 +44,12 @@ define <vscale x 16 x i32> @masked_ld1b_i8_zext_i32(<vscale x 16 x i8> *%base, <
 ; CHECK-NEXT:    uunpklo z2.s, z3.h
 ; CHECK-NEXT:    uunpkhi z3.s, z3.h
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %res
 }
 
-define <vscale x 8 x i32> @masked_ld1b_nxv8i8_zext_i32(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i32> @masked_ld1b_nxv8i8_zext_i32(ptr %a, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_nxv8i8_zext_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z1.h }, p0/z, [x0]
@@ -61,7 +61,7 @@ define <vscale x 8 x i32> @masked_ld1b_nxv8i8_zext_i32(<vscale x 8 x i8> *%a, <v
   ret <vscale x 8 x i32> %res
 }
 
-define <vscale x 16 x i64> @masked_ld1b_i8_sext(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i64> @masked_ld1b_i8_sext(ptr %base, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_i8_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
@@ -80,12 +80,12 @@ define <vscale x 16 x i64> @masked_ld1b_i8_sext(<vscale x 16 x i8> *%base, <vsca
 ; CHECK-NEXT:    sunpklo z6.d, z7.s
 ; CHECK-NEXT:    sunpkhi z7.d, z7.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
   ret <vscale x 16 x i64> %res
 }
 
-define <vscale x 4 x i64> @masked_ld1b_nxv4i8_sext_i64(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i64> @masked_ld1b_nxv4i8_sext_i64(ptr %a, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_nxv4i8_sext_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z1.s }, p0/z, [x0]
@@ -97,7 +97,7 @@ define <vscale x 4 x i64> @masked_ld1b_nxv4i8_sext_i64(<vscale x 4 x i8> *%a, <v
   ret <vscale x 4 x i64> %res
 }
 
-define <vscale x 16 x i64> @masked_ld1b_i8_zext(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i64> @masked_ld1b_i8_zext(ptr %base, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_i8_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
@@ -116,12 +116,12 @@ define <vscale x 16 x i64> @masked_ld1b_i8_zext(<vscale x 16 x i8> *%base, <vsca
 ; CHECK-NEXT:    uunpklo z6.d, z7.s
 ; CHECK-NEXT:    uunpkhi z7.d, z7.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
   ret <vscale x 16 x i64> %res
 }
 
-define <vscale x 4 x i64> @masked_ld1b_nxv4i8_zext_i64(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i64> @masked_ld1b_nxv4i8_zext_i64(ptr %a, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1b_nxv4i8_zext_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z1.s }, p0/z, [x0]
@@ -137,7 +137,7 @@ define <vscale x 4 x i64> @masked_ld1b_nxv4i8_zext_i64(<vscale x 4 x i8> *%a, <v
 ; LD1H
 ;
 
-define <vscale x 8 x i64> @masked_ld1h_i16_sext(<vscale x 8 x i16> *%base, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i64> @masked_ld1h_i16_sext(ptr %base, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1h_i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
@@ -148,12 +148,12 @@ define <vscale x 8 x i64> @masked_ld1h_i16_sext(<vscale x 8 x i16> *%base, <vsca
 ; CHECK-NEXT:    sunpklo z2.d, z3.s
 ; CHECK-NEXT:    sunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   %res = sext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %res
 }
 
-define <vscale x 4 x i64> @masked_ld1h_nxv4i16_sext(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i64> @masked_ld1h_nxv4i16_sext(ptr %a, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1h_nxv4i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z1.s }, p0/z, [x0]
@@ -165,7 +165,7 @@ define <vscale x 4 x i64> @masked_ld1h_nxv4i16_sext(<vscale x 4 x i16> *%a, <vsc
   ret <vscale x 4 x i64> %res
 }
 
-define <vscale x 8 x i64> @masked_ld1h_i16_zext(<vscale x 8 x i16> *%base, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i64> @masked_ld1h_i16_zext(ptr %base, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1h_i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
@@ -176,12 +176,12 @@ define <vscale x 8 x i64> @masked_ld1h_i16_zext(<vscale x 8 x i16> *%base, <vsca
 ; CHECK-NEXT:    uunpklo z2.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   %res = zext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %res
 }
 
-define <vscale x 4 x i64> @masked_ld1h_nxv4i16_zext(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i64> @masked_ld1h_nxv4i16_zext(ptr %a, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1h_nxv4i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0]
@@ -197,33 +197,33 @@ define <vscale x 4 x i64> @masked_ld1h_nxv4i16_zext(<vscale x 4 x i16> *%a, <vsc
 ; LD1W
 ;
 
-define <vscale x 4 x i64> @masked_ld1w_i32_sext(<vscale x 4 x i32> *%base, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i64> @masked_ld1w_i32_sext(ptr %base, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1w_i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
 ; CHECK-NEXT:    sunpklo z0.d, z1.s
 ; CHECK-NEXT:    sunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   %res = sext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %res
 }
 
-define <vscale x 4 x i64> @masked_ld1w_i32_zext(<vscale x 4 x i32> *%base, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i64> @masked_ld1w_i32_zext(ptr %base, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_ld1w_i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0]
 ; CHECK-NEXT:    uunpklo z0.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   %res = zext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %res
 }
 
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>*, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
-declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(<vscale x 8 x i8>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i8>)
-declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(<vscale x 4 x i8>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(<vscale x 4 x i16>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i8>)
+declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
index d7319ed03ac9e..f10b44fe63e0f 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
@@ -11,8 +11,8 @@ define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg,
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -22,8 +22,8 @@ define void @st1b_inbound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -33,8 +33,8 @@ define void @st1b_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg,
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -8
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -45,8 +45,8 @@ define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1
 ; CHECK-NEXT:    rdvl x8, #8
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 8
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -57,8 +57,8 @@ define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1
 ; CHECK-NEXT:    rdvl x8, #-9
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -9
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -68,8 +68,8 @@ define void @st1b_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %base = getelementptr <vscale x 4 x i8>, ptr %a, i64 7
+  %base_scalar = bitcast ptr %base to ptr
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
   call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
@@ -80,8 +80,8 @@ define void @st1b_h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 1
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %base = getelementptr <vscale x 8 x i8>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8>
   call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
@@ -92,8 +92,8 @@ define void @st1b_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 -7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %base = getelementptr <vscale x 2 x i8>, ptr %a, i64 -7
+  %base_scalar = bitcast ptr %base to ptr
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
   call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
@@ -108,8 +108,8 @@ define void @st1h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #-1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 -1
-  %base_scalar = bitcast <vscale x 8 x i16>* %base to ptr
+  %base = getelementptr <vscale x 8 x i16>, ptr %a, i64 -1
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -119,8 +119,8 @@ define void @st1h_f16_inbound(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg,
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #-5, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %a, i64 -5
-  %base_scalar = bitcast <vscale x 8 x half>* %base to ptr
+  %base = getelementptr <vscale x 8 x half>, ptr %a, i64 -5
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -130,8 +130,8 @@ define void @st1h_bf16_inbound(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %p
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #-5, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %a, i64 -5
-  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to ptr
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %a, i64 -5
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -141,8 +141,8 @@ define void @st1h_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 2
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %base = getelementptr <vscale x 4 x i16>, ptr %a, i64 2
+  %base_scalar = bitcast ptr %base to ptr
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
@@ -153,8 +153,8 @@ define void @st1h_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, #-4, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 -4
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %base = getelementptr <vscale x 2 x i16>, ptr %a, i64 -4
+  %base_scalar = bitcast ptr %base to ptr
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
@@ -169,8 +169,8 @@ define void @st1w_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, #6, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 6
-  %base_scalar = bitcast <vscale x 4 x i32>* %base to ptr
+  %base = getelementptr <vscale x 4 x i32>, ptr %a, i64 6
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -180,8 +180,8 @@ define void @st1w_f32_inbound(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg,
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, #-1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %a, i64 -1
-  %base_scalar = bitcast <vscale x 4 x float>* %base to ptr
+  %base = getelementptr <vscale x 4 x float>, ptr %a, i64 -1
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -191,8 +191,8 @@ define void @st1w_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 1
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %base = getelementptr <vscale x 2 x i32>, ptr %a, i64 1
+  %base_scalar = bitcast ptr %base to ptr
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
@@ -207,8 +207,8 @@ define void @st1d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #5, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 5
-  %base_scalar = bitcast <vscale x 2 x i64>* %base to ptr
+  %base = getelementptr <vscale x 2 x i64>, ptr %a, i64 5
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
 }
@@ -218,8 +218,8 @@ define void @st1d_f64_inbound(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %a, i64 -8
-  %base_scalar = bitcast <vscale x 2 x double>* %base to ptr
+  %base = getelementptr <vscale x 2 x double>, ptr %a, i64 -8
+  %base_scalar = bitcast ptr %base to ptr
   call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll
index cace1200c4c1f..8882fc9290386 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-imm-addr-mode.ll
@@ -12,22 +12,22 @@
 ; ST2B
 ;
 
-define void @st2b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2b_i8_valid_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2b { z0.b, z1.b }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 2, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 2, i64 0
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2b_i8_invalid_imm_not_multiple_of_2(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2b_i8_invalid_imm_not_multiple_of_2(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2b_i8_invalid_imm_not_multiple_of_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -35,15 +35,15 @@ define void @st2b_i8_invalid_imm_not_multiple_of_2(<vscale x 16 x i8> %v0, <vsca
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2b { z0.b, z1.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 3, i64 0
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2b_i8_invalid_imm_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -51,15 +51,15 @@ define void @st2b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2b { z0.b, z1.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -18, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -18, i64 0
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2b_i8_invalid_imm_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -67,41 +67,41 @@ define void @st2b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2b { z0.b, z1.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 16, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 16, i64 0
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2b_i8_valid_imm_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2b { z0.b, z1.b }, p0, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -16, i64 0
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2b_i8_valid_imm_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2b { z0.b, z1.b }, p0, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 14, i64 0
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -109,33 +109,33 @@ define void @st2b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16
 ; ST2H
 ;
 
-define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
+define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2h { z0.h, z1.h }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 2, i64 0
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 2, i64 0
   call void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
+define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2h { z0.h, z1.h }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 2, i64 0
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 2, i64 0
   call void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          half* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -143,33 +143,33 @@ define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; ST2W
 ;
 
-define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
+define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2w { z0.s, z1.s }, p0, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 4, i64 0
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 4, i64 0
   call void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
+define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2w { z0.s, z1.s }, p0, [x0, #6, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 6, i64 0
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 6, i64 0
   call void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          float* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -177,33 +177,33 @@ define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST2D
 ;
 
-define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
+define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2d { z0.d, z1.d }, p0, [x0, #8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 8, i64 0
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 8, i64 0
   call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
+define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2d { z0.d, z1.d }, p0, [x0, #10, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 10, i64 0
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 10, i64 0
   call void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          double* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -211,7 +211,7 @@ define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; ST3B
 ;
 
-define void @st3b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8_valid_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -219,16 +219,16 @@ define void @st3b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 3, i64 0
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3b_i8_invalid_imm_not_multiple_of_3_01(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3b_i8_invalid_imm_not_multiple_of_3_01(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8_invalid_imm_not_multiple_of_3_01:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -237,16 +237,16 @@ define void @st3b_i8_invalid_imm_not_multiple_of_3_01(<vscale x 16 x i8> %v0, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 4, i64 0
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3b_i8_invalid_imm_not_multiple_of_3_02(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3b_i8_invalid_imm_not_multiple_of_3_02(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8_invalid_imm_not_multiple_of_3_02:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -255,16 +255,16 @@ define void @st3b_i8_invalid_imm_not_multiple_of_3_02(<vscale x 16 x i8> %v0, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 5, i64 0
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8_invalid_imm_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -273,16 +273,16 @@ define void @st3b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -27, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -27, i64 0
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8_invalid_imm_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -291,16 +291,16 @@ define void @st3b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 24, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 24, i64 0
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8_valid_imm_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -308,16 +308,16 @@ define void @st3b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -24, i64 0
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8_valid_imm_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -325,12 +325,12 @@ define void @st3b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 21, i64 0
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -338,7 +338,7 @@ define void @st3b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16
 ; ST3H
 ;
 
-define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
+define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -346,16 +346,16 @@ define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3h { z0.h - z2.h }, p0, [x0, #6, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 6, i64 0
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 6, i64 0
   call void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i16> %v2,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
+define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -363,12 +363,12 @@ define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3h { z0.h - z2.h }, p0, [x0, #9, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 9, i64 0
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 9, i64 0
   call void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x half> %v2,
                                           <vscale x 8 x i1> %pred,
-                                          half* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -376,7 +376,7 @@ define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; ST3W
 ;
 
-define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
+define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -384,16 +384,16 @@ define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3w { z0.s - z2.s }, p0, [x0, #12, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 12, i64 0
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 12, i64 0
   call void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i32> %v2,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
+define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -401,12 +401,12 @@ define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3w { z0.s - z2.s }, p0, [x0, #15, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 15, i64 0
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 15, i64 0
   call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x float> %v2,
                                           <vscale x 4 x i1> %pred,
-                                          float* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -414,7 +414,7 @@ define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST3D
 ;
 
-define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
+define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -422,16 +422,16 @@ define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3d { z0.d - z2.d }, p0, [x0, #18, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 18, i64 0
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 18, i64 0
   call void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i64> %v2,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
+define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -439,12 +439,12 @@ define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3d { z0.d - z2.d }, p0, [x0, #-3, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -3, i64 0
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 -3, i64 0
   call void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x double> %v2,
                                           <vscale x 2 x i1> %pred,
-                                          double* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -452,7 +452,7 @@ define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; ST4B
 ;
 
-define void @st4b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_valid_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -461,17 +461,17 @@ define void @st4b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4b { z0.b - z3.b }, p0, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 4, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4b_i8_invalid_imm_not_multiple_of_4_01(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_invalid_imm_not_multiple_of_4_01(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_01:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -481,17 +481,17 @@ define void @st4b_i8_invalid_imm_not_multiple_of_4_01(<vscale x 16 x i8> %v0, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4b { z0.b - z3.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 5, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4b_i8_invalid_imm_not_multiple_of_4_02(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_invalid_imm_not_multiple_of_4_02(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_02:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -501,17 +501,17 @@ define void @st4b_i8_invalid_imm_not_multiple_of_4_02(<vscale x 16 x i8> %v0, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4b { z0.b - z3.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 6, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 6, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4b_i8_invalid_imm_not_multiple_of_4_03(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_invalid_imm_not_multiple_of_4_03(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_03:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -521,17 +521,17 @@ define void @st4b_i8_invalid_imm_not_multiple_of_4_03(<vscale x 16 x i8> %v0, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4b { z0.b - z3.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 7, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 7, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_invalid_imm_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -548,17 +548,17 @@ define void @st4b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vsc
 ; xM = -9 * 2^6
 ; xP = RDVL * 2^-4
 ; xBASE = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -36, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -36, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_invalid_imm_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -575,17 +575,17 @@ define void @st4b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vsc
 ; xM = 2^9
 ; xP = RDVL * 2^-4
 ; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 32, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 32, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_valid_imm_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -594,17 +594,17 @@ define void @st4b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4b { z0.b - z3.b }, p0, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -32, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8_valid_imm_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -613,13 +613,13 @@ define void @st4b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4b { z0.b - z3.b }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28, i64 0
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 28, i64 0
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -627,7 +627,7 @@ define void @st4b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16
 ; ST4H
 ;
 
-define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
+define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -636,17 +636,17 @@ define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4h { z0.h - z3.h }, p0, [x0, #8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 8, i64 0
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 8, i64 0
   call void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i16> %v2,
                                           <vscale x 8 x i16> %v3,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
+define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -655,13 +655,13 @@ define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4h { z0.h - z3.h }, p0, [x0, #12, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 12, i64 0
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 12, i64 0
   call void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x half> %v2,
                                           <vscale x 8 x half> %v3,
                                           <vscale x 8 x i1> %pred,
-                                          half* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -669,7 +669,7 @@ define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; ST4W
 ;
 
-define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
+define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -678,17 +678,17 @@ define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4w { z0.s - z3.s }, p0, [x0, #16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 16, i64 0
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 16, i64 0
   call void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i32> %v2,
                                           <vscale x 4 x i32> %v3,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
+define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -697,13 +697,13 @@ define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4w { z0.s - z3.s }, p0, [x0, #20, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 20, i64 0
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 20, i64 0
   call void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x float> %v2,
                                           <vscale x 4 x float> %v3,
                                           <vscale x 4 x i1> %pred,
-                                          float* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -711,7 +711,7 @@ define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST4D
 ;
 
-define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
+define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -720,17 +720,17 @@ define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4d { z0.d - z3.d }, p0, [x0, #24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 24, i64 0
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 24, i64 0
   call void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i64> %v2,
                                           <vscale x 2 x i64> %v3,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
+define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -739,36 +739,36 @@ define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4d { z0.d - z3.d }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 28, i64 0
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 28, i64 0
   call void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x double> %v2,
                                           <vscale x 2 x double> %v3,
                                           <vscale x 2 x i1> %pred,
-                                          double* %base)
-  ret void
-}
-
-declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
-
-declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
-
-declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
+                                          ptr %base)
+  ret void
+}
+
+declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)
+
+declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)
+
+declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
index 0a071f826d926..6c62913e9a8e4 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
@@ -3,103 +3,103 @@
 
 ; LD1B
 
-define <vscale x 16 x i8> @ld1b_lower_bound(<vscale x 16 x i8>* %a) {
+define <vscale x 16 x i8> @ld1b_lower_bound(ptr %a) {
 ; CHECK-LABEL: ld1b_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
-  %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -8
+  %load = load <vscale x 16 x i8>, ptr %base
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ld1b_inbound(<vscale x 16 x i8>* %a) {
+define <vscale x 16 x i8> @ld1b_inbound(ptr %a) {
 ; CHECK-LABEL: ld1b_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 2
-  %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 2
+  %load = load <vscale x 16 x i8>, ptr %base
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i8>* %a) {
+define <vscale x 16 x i8> @ld1b_upper_bound(ptr %a) {
 ; CHECK-LABEL: ld1b_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
-  %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 7
+  %load = load <vscale x 16 x i8>, ptr %base
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i8>* %a) {
+define <vscale x 16 x i8> @ld1b_out_of_upper_bound(ptr %a) {
 ; CHECK-LABEL: ld1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rdvl x8, #8
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
-  %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 8
+  %load = load <vscale x 16 x i8>, ptr %base
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i8>* %a) {
+define <vscale x 16 x i8> @ld1b_out_of_lower_bound(ptr %a) {
 ; CHECK-LABEL: ld1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rdvl x8, #-9
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
-  %load = load <vscale x 16 x i8>, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -9
+  %load = load <vscale x 16 x i8>, ptr %base
   ret <vscale x 16 x i8> %load
 }
 
 ; LD1H
 
-define <vscale x 8 x i16> @ld1h_inbound(<vscale x 8 x i16>* %a) {
+define <vscale x 8 x i16> @ld1h_inbound(ptr %a) {
 ; CHECK-LABEL: ld1h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #-2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 -2
-  %load = load <vscale x 8 x i16>, <vscale x 8 x i16>* %base
+  %base = getelementptr <vscale x 8 x i16>, ptr %a, i64 -2
+  %load = load <vscale x 8 x i16>, ptr %base
   ret <vscale x 8 x i16> %load
 }
 
 ; LD1W
 
-define <vscale x 4 x i32> @ld1s_inbound(<vscale x 4 x i32>* %a) {
+define <vscale x 4 x i32> @ld1s_inbound(ptr %a) {
 ; CHECK-LABEL: ld1s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 4
-  %load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %base
+  %base = getelementptr <vscale x 4 x i32>, ptr %a, i64 4
+  %load = load <vscale x 4 x i32>, ptr %base
   ret <vscale x 4 x i32> %load
 }
 
 ; LD1D
 
-define <vscale x 2 x i64> @ld1d_inbound(<vscale x 2 x i64>* %a) {
+define <vscale x 2 x i64> @ld1d_inbound(ptr %a) {
 ; CHECK-LABEL: ld1d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 6
-  %load = load <vscale x 2 x i64>, <vscale x 2 x i64>* %base
+  %base = getelementptr <vscale x 2 x i64>, ptr %a, i64 6
+  %load = load <vscale x 2 x i64>, ptr %base
   ret <vscale x 2 x i64> %load
 }
 
-define void @load_nxv6f16(<vscale x 6 x half>* %a) {
+define void @load_nxv6f16(ptr %a) {
 ; CHECK-LABEL: load_nxv6f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -107,11 +107,11 @@ define void @load_nxv6f16(<vscale x 6 x half>* %a) {
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1h { z0.s }, p1/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load volatile <vscale x 6 x half>, <vscale x 6 x half>* %a
+  %val = load volatile <vscale x 6 x half>, ptr %a
   ret void
 }
 
-define void @load_nxv6f32(<vscale x 6 x float>* %a) {
+define void @load_nxv6f32(ptr %a) {
 ; CHECK-LABEL: load_nxv6f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -119,11 +119,11 @@ define void @load_nxv6f32(<vscale x 6 x float>* %a) {
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1w { z0.s }, p1/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load volatile <vscale x 6 x float>, <vscale x 6 x float>* %a
+  %val = load volatile <vscale x 6 x float>, ptr %a
   ret void
 }
 
-define void @load_nxv12f16(<vscale x 12 x half>* %a) {
+define void @load_nxv12f16(ptr %a) {
 ; CHECK-LABEL: load_nxv12f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -131,6 +131,6 @@ define void @load_nxv12f16(<vscale x 12 x half>* %a) {
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1h { z0.h }, p1/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load volatile <vscale x 12 x half>, <vscale x 12 x half>* %a
+  %val = load volatile <vscale x 12 x half>, ptr %a
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
index 636e332df13c7..3f31917b125b7 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
@@ -10,7 +10,7 @@ define <vscale x 16 x i8> @ld1_nxv16i8(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %ptr
+  %val = load volatile <vscale x 16 x i8>, ptr %ptr
   ret <vscale x 16 x i8> %val
 }
 
@@ -21,7 +21,7 @@ define <vscale x 8 x i16> @ld1_nxv16i8_bitcast_to_i16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %ptr
+  %val = load volatile <vscale x 8 x i16>, ptr %ptr
   ret <vscale x 8 x i16> %val
 }
 
@@ -32,7 +32,7 @@ define <vscale x 4 x i32> @ld1_nxv16i8_bitcast_to_i32(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %ptr
+  %val = load volatile <vscale x 4 x i32>, ptr %ptr
   ret <vscale x 4 x i32> %val
 }
 
@@ -43,7 +43,7 @@ define <vscale x 2 x i64> @ld1_nxv16i8_bitcast_to_i64(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %ptr
+  %val = load volatile <vscale x 2 x i64>, ptr %ptr
   ret <vscale x 2 x i64> %val
 }
 
@@ -54,7 +54,7 @@ define <vscale x 8 x i16> @ld1_nxv8i16_zext8(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %ptr
+  %val = load volatile <vscale x 8 x i8>, ptr %ptr
   %zext = zext <vscale x 8 x i8> %val to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %zext
 }
@@ -66,7 +66,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_zext8(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %ptr
+  %val = load volatile <vscale x 4 x i8>, ptr %ptr
   %zext = zext <vscale x 4 x i8> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %zext
 }
@@ -78,7 +78,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_zext8(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %ptr
+  %val = load volatile <vscale x 2 x i8>, ptr %ptr
   %zext = zext <vscale x 2 x i8> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %zext
 }
@@ -90,7 +90,7 @@ define <vscale x 8 x i16> @ld1_nxv8i16_sext8(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %ptr
+  %val = load volatile <vscale x 8 x i8>, ptr %ptr
   %sext = sext <vscale x 8 x i8> %val to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %sext
 }
@@ -102,7 +102,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_sext8(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %ptr
+  %val = load volatile <vscale x 4 x i8>, ptr %ptr
   %sext = sext <vscale x 4 x i8> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %sext
 }
@@ -114,7 +114,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_sext8(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %ptr
+  %val = load volatile <vscale x 2 x i8>, ptr %ptr
   %sext = sext <vscale x 2 x i8> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %sext
 }
@@ -128,7 +128,7 @@ define <vscale x 8 x i16> @ld1_nxv8i16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
-  %val = load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %ptr
+  %val = load volatile <vscale x 8 x i16>, ptr %ptr
   ret <vscale x 8 x i16> %val
 }
 
@@ -139,7 +139,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_zext16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %ptr
+  %val = load volatile <vscale x 4 x i16>, ptr %ptr
   %zext = zext <vscale x 4 x i16> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %zext
 }
@@ -151,7 +151,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_zext16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %ptr
+  %val = load volatile <vscale x 2 x i16>, ptr %ptr
   %zext = zext <vscale x 2 x i16> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %zext
 }
@@ -163,7 +163,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32_sext16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %ptr
+  %val = load volatile <vscale x 4 x i16>, ptr %ptr
   %sext = sext <vscale x 4 x i16> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %sext
 }
@@ -175,7 +175,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_sext16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %ptr
+  %val = load volatile <vscale x 2 x i16>, ptr %ptr
   %sext = sext <vscale x 2 x i16> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %sext
 }
@@ -187,7 +187,7 @@ define <vscale x 8 x half> @ld1_nxv8f16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
-  %val = load volatile <vscale x 8 x half>, <vscale x 8 x half>* %ptr
+  %val = load volatile <vscale x 8 x half>, ptr %ptr
   ret <vscale x 8 x half> %val
 }
 
@@ -198,7 +198,7 @@ define <vscale x 8 x bfloat> @ld1_nxv8bf16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
-  %val = load volatile <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %ptr
+  %val = load volatile <vscale x 8 x bfloat>, ptr %ptr
   ret <vscale x 8 x bfloat> %val
 }
 
@@ -209,7 +209,7 @@ define <vscale x 4 x half> @ld1_nxv4f16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x half>, <vscale x 4 x half>* %ptr
+  %val = load volatile <vscale x 4 x half>, ptr %ptr
   ret <vscale x 4 x half> %val
 }
 
@@ -220,7 +220,7 @@ define <vscale x 4 x bfloat> @ld1_nxv4bf16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x bfloat>, <vscale x 4 x bfloat>* %ptr
+  %val = load volatile <vscale x 4 x bfloat>, ptr %ptr
   ret <vscale x 4 x bfloat> %val
 }
 
@@ -231,7 +231,7 @@ define <vscale x 2 x half> @ld1_nxv2f16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x half>, <vscale x 2 x half>* %ptr
+  %val = load volatile <vscale x 2 x half>, ptr %ptr
   ret <vscale x 2 x half> %val
 }
 
@@ -242,7 +242,7 @@ define <vscale x 2 x bfloat> @ld1_nxv2bf16(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x bfloat>, <vscale x 2 x bfloat>* %ptr
+  %val = load volatile <vscale x 2 x bfloat>, ptr %ptr
   ret <vscale x 2 x bfloat> %val
 }
 
@@ -255,7 +255,7 @@ define <vscale x 4 x i32> @ld1_nxv4i32(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %ptr
+  %val = load volatile <vscale x 4 x i32>, ptr %ptr
   ret <vscale x 4 x i32> %val
 }
 
@@ -266,7 +266,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_zext32(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %ptr
+  %val = load volatile <vscale x 2 x i32>, ptr %ptr
   %zext = zext <vscale x 2 x i32> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %zext
 }
@@ -278,7 +278,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64_sext32(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %ptr
+  %val = load volatile <vscale x 2 x i32>, ptr %ptr
   %sext = sext <vscale x 2 x i32> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %sext
 }
@@ -290,7 +290,7 @@ define <vscale x 4 x float> @ld1_nxv4f32(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
-  %val = load volatile <vscale x 4 x float>, <vscale x 4 x float>* %ptr
+  %val = load volatile <vscale x 4 x float>, ptr %ptr
   ret <vscale x 4 x float> %val
 }
 
@@ -301,7 +301,7 @@ define <vscale x 2 x float> @ld1_nxv2f32(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x float>, <vscale x 2 x float>* %ptr
+  %val = load volatile <vscale x 2 x float>, ptr %ptr
   ret <vscale x 2 x float> %val
 }
 
@@ -314,7 +314,7 @@ define <vscale x 2 x i64> @ld1_nxv2i64(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i64, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %ptr
+  %val = load volatile <vscale x 2 x i64>, ptr %ptr
   ret <vscale x 2 x i64> %val
 }
 
@@ -325,6 +325,6 @@ define <vscale x 2 x double> @ld1_nxv2f64(ptr %addr, i64 %off) {
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds double, ptr %addr, i64 %off
-  %val = load volatile <vscale x 2 x double>, <vscale x 2 x double>* %ptr
+  %val = load volatile <vscale x 2 x double>, ptr %ptr
   ret <vscale x 2 x double> %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1r.ll b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
index fcfcb5619f7dd..e42e2272a2d4f 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1r.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
@@ -1411,7 +1411,7 @@ define <vscale x 2 x double> @negtest_dup_ld1rd_double_passthru_nxv2f64(<vscale
 
 
 ; Check that a load consumed by a scalable splat prefers a replicating load.
-define i8* @avoid_preindex_load(i8* %src, <vscale x 2 x i64>* %out) {
+define ptr @avoid_preindex_load(ptr %src, ptr %out) {
 ; CHECK-LABEL: avoid_preindex_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -1419,18 +1419,18 @@ define i8* @avoid_preindex_load(i8* %src, <vscale x 2 x i64>* %out) {
 ; CHECK-NEXT:    add x0, x0, #1
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = sext i8 %tmp to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
-  ret i8* %ptr
+  store <vscale x 2 x i64> %dup, ptr %out
+  ret ptr %ptr
 }
 
 ; Check that a load consumed by a scalable splat prefers a replicating
 ; load over a pre-indexed load.
-define i8* @avoid_preindex_load_dup(i8* %src, <vscale x 2 x i1> %pg, <vscale x 2 x i64>* %out) {
+define ptr @avoid_preindex_load_dup(ptr %src, <vscale x 2 x i1> %pg, ptr %out) {
 ; CHECK-LABEL: avoid_preindex_load_dup:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.d
@@ -1438,16 +1438,16 @@ define i8* @avoid_preindex_load_dup(i8* %src, <vscale x 2 x i1> %pg, <vscale x 2
 ; CHECK-NEXT:    add x0, x0, #1
 ; CHECK-NEXT:    st1d { z0.d }, p1, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = sext i8 %tmp to i64
   %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
-  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
-  ret i8* %ptr
+  store <vscale x 2 x i64> %dup, ptr %out
+  ret ptr %ptr
 }
 
 ; Same as avoid_preindex_load_dup, but with zero passthru.
-define i8* @avoid_preindex_load_dup_passthru_zero(i8* %src, <vscale x 2 x i1> %pg, <vscale x 2 x i64>* %out) {
+define ptr @avoid_preindex_load_dup_passthru_zero(ptr %src, <vscale x 2 x i1> %pg, ptr %out) {
 ; CHECK-LABEL: avoid_preindex_load_dup_passthru_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.d
@@ -1455,16 +1455,16 @@ define i8* @avoid_preindex_load_dup_passthru_zero(i8* %src, <vscale x 2 x i1> %p
 ; CHECK-NEXT:    add x0, x0, #1
 ; CHECK-NEXT:    st1d { z0.d }, p1, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = sext i8 %tmp to i64
   %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 %ext)
-  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
-  ret i8* %ptr
+  store <vscale x 2 x i64> %dup, ptr %out
+  ret ptr %ptr
 }
 
 ; If a dup has a non-undef passthru, stick with the pre-indexed load.
-define i8* @preindex_load_dup_passthru(<vscale x 2 x i64> %passthru, i8* %src, <vscale x 2 x i1> %pg, <vscale x 2 x i64>* %out) {
+define ptr @preindex_load_dup_passthru(<vscale x 2 x i64> %passthru, ptr %src, <vscale x 2 x i1> %pg, ptr %out) {
 ; CHECK-LABEL: preindex_load_dup_passthru:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.d
@@ -1472,17 +1472,17 @@ define i8* @preindex_load_dup_passthru(<vscale x 2 x i64> %passthru, i8* %src, <
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
 ; CHECK-NEXT:    st1d { z0.d }, p1, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = sext i8 %tmp to i64
   %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %passthru, <vscale x 2 x i1> %pg, i64 %ext)
-  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
-  ret i8* %ptr
+  store <vscale x 2 x i64> %dup, ptr %out
+  ret ptr %ptr
 }
 
 ; Show that a second user of the load prevents the replicating load
 ; check which would ordinarily inhibit indexed loads from firing.
-define i8* @preidx8sext64_instead_of_ld1r(i8* %src, <vscale x 2 x i64>* %out, i64* %dst) {
+define ptr @preidx8sext64_instead_of_ld1r(ptr %src, ptr %out, ptr %dst) {
 ; CHECK-LABEL: preidx8sext64_instead_of_ld1r:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -1491,14 +1491,14 @@ define i8* @preidx8sext64_instead_of_ld1r(i8* %src, <vscale x 2 x i64>* %out, i6
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
 ; CHECK-NEXT:    str x8, [x2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = sext i8 %tmp to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
-  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
-  store i64 %ext, i64* %dst
-  ret i8* %ptr
+  store <vscale x 2 x i64> %dup, ptr %out
+  store i64 %ext, ptr %dst
+  ret ptr %ptr
 }
 
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather.ll
index 5d26989e4a768..9f884d0022679 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather.ll
@@ -1,128 +1,128 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x bfloat*> %ptrs, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_gather_passthru(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
+define <vscale x 2 x i64> @masked_gather_passthru(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
 ; CHECK-LABEL: masked_gather_passthru:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    sel z0.d, p0, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_gather_passthru_0(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_passthru_0(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_passthru_0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> zeroinitializer)
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> zeroinitializer)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
@@ -151,12 +151,12 @@ define <vscale x 2 x i64> @masked_gather_non_element_type_based_scaling(ptr %bas
   ret <vscale x 2 x i64> %vals
 }
 
-declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
index 5c4e744ba108e..14bb93a867e96 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-nonext.ll
@@ -4,125 +4,125 @@
 ; Masked Loads
 ;
 
-define <vscale x 2 x i64> @masked_load_nxv2i64(<vscale x 2 x i64> *%a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_load_nxv2i64(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2i64:
 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64> *%a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 4 x i32> @masked_load_nxv4i32(<vscale x 4 x i32> *%a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4i32:
 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32> *%a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 8 x i16> @masked_load_nxv8i16(<vscale x 8 x i16> *%a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x i16> @masked_load_nxv8i16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8i16:
 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16> *%a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 16 x i8> @masked_load_nxv16i8(<vscale x 16 x i8> *%a, <vscale x 16 x i1> %mask) nounwind {
+define <vscale x 16 x i8> @masked_load_nxv16i8(ptr %a, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv16i8:
 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8> *%a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 2 x double> @masked_load_nxv2f64(<vscale x 2 x double> *%a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x double> @masked_load_nxv2f64(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2f64:
 ; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double> *%a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %load
 }
 
-define <vscale x 2 x float> @masked_load_nxv2f32(<vscale x 2 x float> *%a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x float> @masked_load_nxv2f32(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2f32:
 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float> *%a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %load
 }
 
-define <vscale x 2 x half> @masked_load_nxv2f16(<vscale x 2 x half> *%a, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x half> @masked_load_nxv2f16(ptr %a, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv2f16:
 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half> *%a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %load
 }
 
-define <vscale x 2 x bfloat> @masked_load_nxv2bf16(<vscale x 2 x bfloat> *%a, <vscale x 2 x i1> %mask) nounwind #0 {
+define <vscale x 2 x bfloat> @masked_load_nxv2bf16(ptr %a, <vscale x 2 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: masked_load_nxv2bf16:
 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(<vscale x 2 x bfloat> *%a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %load = call <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(ptr %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %load
 }
 
-define <vscale x 4 x float> @masked_load_nxv4f32(<vscale x 4 x float> *%a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x float> @masked_load_nxv4f32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4f32:
 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float> *%a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+  %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 4 x half> @masked_load_nxv4f16(<vscale x 4 x half> *%a, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x half> @masked_load_nxv4f16(ptr %a, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv4f16:
 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half> *%a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
   ret <vscale x 4 x half> %load
 }
 
-define <vscale x 4 x bfloat> @masked_load_nxv4bf16(<vscale x 4 x bfloat> *%a, <vscale x 4 x i1> %mask) nounwind #0 {
+define <vscale x 4 x bfloat> @masked_load_nxv4bf16(ptr %a, <vscale x 4 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: masked_load_nxv4bf16:
 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(<vscale x 4 x bfloat> *%a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+  %load = call <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(ptr %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
   ret <vscale x 4 x bfloat> %load
 }
 
-define <vscale x 8 x half> @masked_load_nxv8f16(<vscale x 8 x half> *%a, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x half> @masked_load_nxv8f16(ptr %a, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_load_nxv8f16:
 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half> *%a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
+  %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @masked_load_nxv8bf16(<vscale x 8 x bfloat> *%a, <vscale x 8 x i1> %mask) nounwind #0 {
+define <vscale x 8 x bfloat> @masked_load_nxv8bf16(ptr %a, <vscale x 8 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: masked_load_nxv8bf16:
 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT: ret
-  %load = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat> *%a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
+  %load = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
   ret <vscale x 8 x bfloat> %load
 }
 
-define <vscale x 4 x i32> @masked_load_passthru(<vscale x 4 x i32> *%a, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %passthru) nounwind {
+define <vscale x 4 x i32> @masked_load_passthru(ptr %a, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %passthru) nounwind {
 ; CHECK-LABEL: masked_load_passthru:
 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
 ; CHECK-NEXT: mov z0.s, p0/m, z1.s
 ; CHECK-NEXT: ret
-  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32> *%a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %passthru)
+  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %passthru)
   ret <vscale x 4 x i32> %load
 }
 
 ; Masked load requires promotion
-define <vscale x 2 x i16> @masked_load_nxv2i16(<vscale x 2 x i16>* noalias %in, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @masked_load_nxv2i16(ptr noalias %in, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_load_nxv2i16
 ; CHECK:       ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:  ret
-  %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   ret <vscale x 2 x i16> %wide.load
 }
 
@@ -130,107 +130,107 @@ define <vscale x 2 x i16> @masked_load_nxv2i16(<vscale x 2 x i16>* noalias %in,
 ; Masked Stores
 ;
 
-define void @masked_store_nxv2i64(<vscale x 2 x i64> *%a, <vscale x 2 x i64> %val, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2i64(ptr %a, <vscale x 2 x i64> %val, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2i64:
 ; CHECK-NEXT: st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64> *%a, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %val, ptr %a, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv4i32(<vscale x 4 x i32> *%a, <vscale x 4 x i32> %val, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4i32(ptr %a, <vscale x 4 x i32> %val, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4i32:
 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32> *%a, i32 4, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv8i16(<vscale x 8 x i16> *%a, <vscale x 8 x i16> %val, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8i16(ptr %a, <vscale x 8 x i16> %val, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8i16:
 ; CHECK-NEXT: st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16> *%a, i32 2, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv16i8(<vscale x 16 x i8> *%a, <vscale x 16 x i8> %val, <vscale x 16 x i1> %mask) nounwind {
+define void @masked_store_nxv16i8(ptr %a, <vscale x 16 x i8> %val, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv16i8:
 ; CHECK-NEXT: st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %val, <vscale x 16 x i8> *%a, i32 1, <vscale x 16 x i1> %mask)
+  call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %val, ptr %a, i32 1, <vscale x 16 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv2f64(<vscale x 2 x double> *%a, <vscale x 2 x double> %val, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2f64(ptr %a, <vscale x 2 x double> %val, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2f64:
 ; CHECK-NEXT: st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double> *%a, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2f64(<vscale x 2 x double> %val, ptr %a, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv2f32(<vscale x 2 x float> *%a, <vscale x 2 x float> %val, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2f32(ptr %a, <vscale x 2 x float> %val, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2f32:
 ; CHECK-NEXT: st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float> *%a, i32 4, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2f32(<vscale x 2 x float> %val, ptr %a, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv2f16(<vscale x 2 x half> *%a, <vscale x 2 x half> %val, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_store_nxv2f16(ptr %a, <vscale x 2 x half> %val, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv2f16:
 ; CHECK-NEXT: st1h { z0.d }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half> *%a, i32 4, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2f16(<vscale x 2 x half> %val, ptr %a, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv4f32(<vscale x 4 x float> *%a, <vscale x 4 x float> %val, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4f32(ptr %a, <vscale x 4 x float> %val, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4f32:
 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float> *%a, i32 4, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4f32(<vscale x 4 x float> %val, ptr %a, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv4f16(<vscale x 4 x half> *%a, <vscale x 4 x half> %val, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_store_nxv4f16(ptr %a, <vscale x 4 x half> %val, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv4f16:
 ; CHECK-NEXT: st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half> *%a, i32 2, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4f16(<vscale x 4 x half> %val, ptr %a, i32 2, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv8f16(<vscale x 8 x half> *%a, <vscale x 8 x half> %val, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_store_nxv8f16(ptr %a, <vscale x 8 x half> %val, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_store_nxv8f16:
 ; CHECK-NEXT: st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half> *%a, i32 2, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.nxv8f16(<vscale x 8 x half> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv2bf16(<vscale x 2 x bfloat> *%a, <vscale x 2 x bfloat> %val, <vscale x 2 x i1> %mask) nounwind #0 {
+define void @masked_store_nxv2bf16(ptr %a, <vscale x 2 x bfloat> %val, <vscale x 2 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: masked_store_nxv2bf16:
 ; CHECK-NEXT: st1h { z0.d }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv2bf16(<vscale x 2 x bfloat> %val, <vscale x 2 x bfloat> *%a, i32 2, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2bf16(<vscale x 2 x bfloat> %val, ptr %a, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv4bf16(<vscale x 4 x bfloat> *%a, <vscale x 4 x bfloat> %val, <vscale x 4 x i1> %mask) nounwind #0 {
+define void @masked_store_nxv4bf16(ptr %a, <vscale x 4 x bfloat> %val, <vscale x 4 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: masked_store_nxv4bf16:
 ; CHECK-NEXT: st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv4bf16(<vscale x 4 x bfloat> %val, <vscale x 4 x bfloat> *%a, i32 2, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4bf16(<vscale x 4 x bfloat> %val, ptr %a, i32 2, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_store_nxv8bf16(<vscale x 8 x bfloat> *%a, <vscale x 8 x bfloat> %val, <vscale x 8 x i1> %mask) nounwind #0 {
+define void @masked_store_nxv8bf16(ptr %a, <vscale x 8 x bfloat> %val, <vscale x 8 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: masked_store_nxv8bf16:
 ; CHECK-NEXT: st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT: ret
-  call void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat> %val, <vscale x 8 x bfloat> *%a, i32 2, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat> %val, ptr %a, i32 2, <vscale x 8 x i1> %mask)
   ret void
 }
 
@@ -240,132 +240,124 @@ define void @masked_store_nxv8bf16(<vscale x 8 x bfloat> *%a, <vscale x 8 x bflo
 
 ; Pointer of integer type
 
-define <vscale x 2 x i8*> @masked.load.nxv2p0i8(<vscale x 2 x i8*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x ptr> @masked.load.nxv2p0i8(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.load.nxv2p0i8:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i8*> @llvm.masked.load.nxv2p0i8.p0nxv2p0i8(<vscale x 2 x i8*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i8*> undef)
-  ret <vscale x 2 x i8*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
-define <vscale x 2 x i16*> @masked.load.nxv2p0i16(<vscale x 2 x i16*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x ptr> @masked.load.nxv2p0i16(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.load.nxv2p0i16:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i16*> @llvm.masked.load.nxv2p0i16.p0nxv2p0i16(<vscale x 2 x i16*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i16*> undef)
-  ret <vscale x 2 x i16*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
-define <vscale x 2 x i32*> @masked.load.nxv2p0i32(<vscale x 2 x i32*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x ptr> @masked.load.nxv2p0i32(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.load.nxv2p0i32:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i32*> @llvm.masked.load.nxv2p0i32.p0nxv2p0i32(<vscale x 2 x i32*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i32*> undef)
-  ret <vscale x 2 x i32*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
-define <vscale x 2 x i64*> @masked.load.nxv2p0i64(<vscale x 2 x i64*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x ptr> @masked.load.nxv2p0i64(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.load.nxv2p0i64:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i64*> @llvm.masked.load.nxv2p0i64.p0nxv2p0i64(<vscale x 2 x i64*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64*> undef)
-  ret <vscale x 2 x i64*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
 
 ; Pointer of floating-point type
 
-define <vscale x 2 x bfloat*> @masked.load.nxv2p0bf16(<vscale x 2 x bfloat*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind #0 {
+define <vscale x 2 x ptr> @masked.load.nxv2p0bf16(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: masked.load.nxv2p0bf16:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x bfloat*> @llvm.masked.load.nxv2p0bf16.p0nxv2p0bf16(<vscale x 2 x bfloat*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat*> undef)
-  ret <vscale x 2 x bfloat*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
-define <vscale x 2 x half*> @masked.load.nxv2p0f16(<vscale x 2 x half*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x ptr> @masked.load.nxv2p0f16(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.load.nxv2p0f16:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x half*> @llvm.masked.load.nxv2p0f16.p0nxv2p0f16(<vscale x 2 x half*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x half*> undef)
-  ret <vscale x 2 x half*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
-define <vscale x 2 x float*> @masked.load.nxv2p0f32(<vscale x 2 x float*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x ptr> @masked.load.nxv2p0f32(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.load.nxv2p0f32:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x float*> @llvm.masked.load.nxv2p0f32.p0nxv2p0f32(<vscale x 2 x float*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x float*> undef)
-  ret <vscale x 2 x float*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
-define <vscale x 2 x double*> @masked.load.nxv2p0f64(<vscale x 2 x double*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x ptr> @masked.load.nxv2p0f64(ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.load.nxv2p0f64:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x double*> @llvm.masked.load.nxv2p0f64.p0nxv2p0f64(<vscale x 2 x double*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double*> undef)
-  ret <vscale x 2 x double*> %v
+  %v = call <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x ptr> undef)
+  ret <vscale x 2 x ptr> %v
 }
 
 ; Pointer of array type
 
-define void @masked.store.nxv2p0a64i16(<vscale x 2 x [64 x i16]*> %data, <vscale x 2 x [64 x i16]*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+define void @masked.store.nxv2p0a64i16(<vscale x 2 x ptr> %data, ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.store.nxv2p0a64i16:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv2p0a64i16.p0nxv2p0a64i16(<vscale x 2 x [64 x i16]*> %data, <vscale x 2 x [64 x i16]*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2p0.p0(<vscale x 2 x ptr> %data, ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
 ; Pointer of struct type
 
-%struct = type { i8*, i32 }
-define void @masked.store.nxv2p0s_struct(<vscale x 2 x %struct*> %data, <vscale x 2 x %struct*>* %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
+%struct = type { ptr, i32 }
+define void @masked.store.nxv2p0s_struct(<vscale x 2 x ptr> %data, ptr %vector_ptr, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked.store.nxv2p0s_struct:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv2p0s_struct.p0nxv2p0s_struct(<vscale x 2 x %struct*> %data, <vscale x 2 x %struct*>* %vector_ptr, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2p0.p0(<vscale x 2 x ptr> %data, ptr %vector_ptr, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
 
-declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>*, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
-
-declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>*, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
-declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>*, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>*, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>*, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
-declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>*, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>*, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(<vscale x 2 x bfloat>*, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(<vscale x 4 x bfloat>*, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
-declare <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>*, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
-
-declare void @llvm.masked.store.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
-
-declare void @llvm.masked.store.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>*, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>*, i32, <vscale x 8 x i1>)
-
-declare <vscale x 2 x i8*> @llvm.masked.load.nxv2p0i8.p0nxv2p0i8(<vscale x 2 x i8*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x i8*>)
-declare <vscale x 2 x i16*> @llvm.masked.load.nxv2p0i16.p0nxv2p0i16(<vscale x 2 x i16*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x i16*>)
-declare <vscale x 2 x i32*> @llvm.masked.load.nxv2p0i32.p0nxv2p0i32(<vscale x 2 x i32*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x i32*>)
-declare <vscale x 2 x i64*> @llvm.masked.load.nxv2p0i64.p0nxv2p0i64(<vscale x 2 x i64*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x i64*>)
-
-declare <vscale x 2 x bfloat*> @llvm.masked.load.nxv2p0bf16.p0nxv2p0bf16(<vscale x 2 x bfloat*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x bfloat*>)
-declare <vscale x 2 x half*> @llvm.masked.load.nxv2p0f16.p0nxv2p0f16(<vscale x 2 x half*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x half*>)
-declare <vscale x 2 x float*> @llvm.masked.load.nxv2p0f32.p0nxv2p0f32(<vscale x 2 x float*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x float*>)
-declare <vscale x 2 x double*> @llvm.masked.load.nxv2p0f64.p0nxv2p0f64(<vscale x 2 x double*>*, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x double*>)
-
-declare void @llvm.masked.store.nxv2p0a64i16.p0nxv2p0a64i16(<vscale x 2 x [64 x i16]*>, <vscale x 2 x [64 x i16]*>*, i32 immarg, <vscale x 2 x i1>)
-
-declare void @llvm.masked.store.nxv2p0s_struct.p0nxv2p0s_struct(<vscale x 2 x %struct*>, <vscale x 2 x %struct*>*, i32 immarg, <vscale x 2 x i1>)
+declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+
+declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.load.nxv2bf16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 4 x bfloat> @llvm.masked.load.nxv4bf16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
+declare <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
+
+declare void @llvm.masked.store.nxv2i64(<vscale x 2 x i64>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>)
+
+declare void @llvm.masked.store.nxv2f64(<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f32(<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f16(<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv4f32(<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f16(<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv8f16(<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv2bf16(<vscale x 2 x bfloat>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv4bf16(<vscale x 4 x bfloat>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat>, ptr, i32, <vscale x 8 x i1>)
+
+declare <vscale x 2 x ptr> @llvm.masked.load.nxv2p0.p0(ptr, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x ptr>)
+
+
+declare void @llvm.masked.store.nxv2p0.p0(<vscale x 2 x ptr>, ptr, i32 immarg, <vscale x 2 x i1>)
+
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
index de8579652803b..40d889f1b501e 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-sext.ll
@@ -5,67 +5,67 @@
 ; Masked Loads
 ;
 
-define <vscale x 2 x i64> @masked_sload_nxv2i8(<vscale x 2 x i8> *%a, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sload_nxv2i8(ptr %a, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_nxv2i16(<vscale x 2 x i16> *%a, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sload_nxv2i16(ptr %a, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_nxv2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sload_nxv2i32(ptr %a, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 4 x i32> @masked_sload_nxv4i8(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sload_nxv4i8(ptr %a, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_nxv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_sload_nxv4i16(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sload_nxv4i16(ptr %a, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 8 x i16> @masked_sload_nxv8i8(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @masked_sload_nxv8i8(ptr %a, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_nxv8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
   %ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_passthru(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
+define <vscale x 2 x i64> @masked_sload_passthru(ptr %a, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
 ; CHECK-LABEL: masked_sload_passthru:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.d
@@ -73,13 +73,13 @@ define <vscale x 2 x i64> @masked_sload_passthru(<vscale x 2 x i32> *%a, <vscale
 ; CHECK-NEXT:    sxtw z0.d, p1/m, z0.d
 ; CHECK-NEXT:    mov z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
   %ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
 ; Return type requires splitting
-define <vscale x 16 x i32> @masked_sload_nxv16i8(<vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i32> @masked_sload_nxv16i8(ptr %a, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
@@ -90,13 +90,13 @@ define <vscale x 16 x i32> @masked_sload_nxv16i8(<vscale x 16 x i8>* %a, <vscale
 ; CHECK-NEXT:    sunpklo z2.s, z3.h
 ; CHECK-NEXT:    sunpkhi z3.s, z3.h
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %ext = sext <vscale x 16 x i8> %load to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %ext
 }
 
 ; Masked load requires promotion
-define <vscale x 4 x double> @masked_sload_4i8_4f32(<vscale x 4 x i8>* noalias %in, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x double> @masked_sload_4i8_4f32(ptr noalias %in, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sload_4i8_4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
@@ -106,7 +106,7 @@ define <vscale x 4 x double> @masked_sload_4i8_4f32(<vscale x 4 x i8>* noalias %
 ; CHECK-NEXT:    scvtf z0.d, p1/m, z0.d
 ; CHECK-NEXT:    scvtf z1.d, p1/m, z1.d
 ; CHECK-NEXT:    ret
-  %wide.load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %in, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %wide.load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %in, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %sext = sext <vscale x 4 x i8> %wide.load to <vscale x 4 x i64>
   %res = sitofp <vscale x 4 x i64> %sext to <vscale x 4 x double>
   ret <vscale x 4 x double> %res
@@ -122,7 +122,7 @@ define <vscale x 4 x i64> @masked_sload_4i8_4i64(ptr %a, <vscale x 4 x i1> %b) {
 ; CHECK-NEXT:    sunpklo z0.d, z1.s
 ; CHECK-NEXT:    sunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i8> zeroinitializer)
+  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i8> zeroinitializer)
   %aext = sext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %aext
 }
@@ -134,7 +134,7 @@ define <vscale x 4 x i64> @masked_sload_4i16_4i64(ptr %a, <vscale x 4 x i1> %b)
 ; CHECK-NEXT:    sunpklo z0.d, z1.s
 ; CHECK-NEXT:    sunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i16> zeroinitializer)
+  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i16> zeroinitializer)
   %aext = sext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %aext
 }
@@ -146,7 +146,7 @@ define <vscale x 8 x i32> @masked_sload_8i8_8i32(ptr %a, <vscale x 8 x i1> %b) {
 ; CHECK-NEXT:    sunpklo z0.s, z1.h
 ; CHECK-NEXT:    sunpkhi z1.s, z1.h
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
   %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %aext
 }
@@ -162,7 +162,7 @@ define <vscale x 8 x i64> @masked_sload_8i8_8i64(ptr %a, <vscale x 8 x i1> %b) {
 ; CHECK-NEXT:    sunpklo z2.d, z3.s
 ; CHECK-NEXT:    sunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
   %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %aext
 }
@@ -179,8 +179,8 @@ define <vscale x 4 x i64> @masked_sload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
-  %bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
+  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
+  %bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
   %aext = sext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
   %bext = sext <vscale x 4 x i8> %bval to <vscale x 4 x i64>
   %res = add <vscale x 4 x i64> %aext, %bext
@@ -199,8 +199,8 @@ define <vscale x 4 x i64> @masked_sload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
-  %bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
+  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
+  %bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
   %aext = sext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
   %bext = sext <vscale x 4 x i16> %bval to <vscale x 4 x i64>
   %res = add <vscale x 4 x i64> %aext, %bext
@@ -219,8 +219,8 @@ define <vscale x 8 x i32> @masked_sload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    add z1.s, z1.s, z2.s
 ; CHECK-NEXT:    add z0.s, z0.s, z3.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
-  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
   %bext = sext <vscale x 8 x i8> %bval to <vscale x 8 x i32>
   %res = add <vscale x 8 x i32> %aext, %bext
@@ -249,8 +249,8 @@ define <vscale x 8 x i64> @masked_sload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    add z1.d, z1.d, z7.d
 ; CHECK-NEXT:    add z0.d, z0.d, z4.d
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
-  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %aext = sext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
   %bext = sext <vscale x 8 x i8> %bval to <vscale x 8 x i64>
   %res = add <vscale x 8 x i64> %aext, %bext
@@ -258,10 +258,10 @@ define <vscale x 8 x i64> @masked_sload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8
 }
 
 
-declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>*, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll
index d2069da00de6a..63e7d9e27eab9 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll
@@ -4,63 +4,63 @@
 ; Masked Stores
 ;
 
-define void @masked_trunc_store_nxv2i8(<vscale x 2 x i64> *%a, <vscale x 2 x i64> %val, <vscale x 2 x i8> *%b, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_trunc_store_nxv2i8(ptr %a, <vscale x 2 x i64> %val, ptr %b, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_nxv2i8:
 ; CHECK-NEXT: st1b { z0.d }, p0, [x1]
 ; CHECK-NEXT: ret
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
-  call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i8> *%b, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc, ptr %b, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_nxv2i16(<vscale x 2 x i64> *%a, <vscale x 2 x i64> %val, <vscale x 2 x i16> *%b, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_trunc_store_nxv2i16(ptr %a, <vscale x 2 x i64> %val, ptr %b, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_nxv2i16:
 ; CHECK-NEXT: st1h { z0.d }, p0, [x1]
 ; CHECK-NEXT: ret
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
-  call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i16> *%b, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %trunc, ptr %b, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_nxv2i32(<vscale x 2 x i64> *%a, <vscale x 2 x i64> %val, <vscale x 2 x i32> *%b, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_trunc_store_nxv2i32(ptr %a, <vscale x 2 x i64> %val, ptr %b, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_nxv2i32:
 ; CHECK-NEXT: st1w { z0.d }, p0, [x1]
 ; CHECK-NEXT: ret
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
-  call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i32> *%b, i32 8, <vscale x 2 x i1> %mask)
+  call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %trunc, ptr %b, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_nxv4i8(<vscale x 4 x i32> *%a, <vscale x 4 x i32> %val, <vscale x 4 x i8> *%b, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_trunc_store_nxv4i8(ptr %a, <vscale x 4 x i32> %val, ptr %b, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_nxv4i8:
 ; CHECK-NEXT: st1b { z0.s }, p0, [x1]
 ; CHECK-NEXT: ret
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
-  call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i8> *%b, i32 4, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %trunc, ptr %b, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_nxv4i16(<vscale x 4 x i32> *%a, <vscale x 4 x i32> %val, <vscale x 4 x i16> *%b, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_trunc_store_nxv4i16(ptr %a, <vscale x 4 x i32> %val, ptr %b, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_nxv4i16:
 ; CHECK-NEXT: st1h { z0.s }, p0, [x1]
 ; CHECK-NEXT: ret
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
-  call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i16> *%b, i32 4, <vscale x 4 x i1> %mask)
+  call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %trunc, ptr %b, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_nxv8i8(<vscale x 8 x i16> *%a, <vscale x 8 x i16> %val, <vscale x 8 x i8> *%b, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_trunc_store_nxv8i8(ptr %a, <vscale x 8 x i16> %val, ptr %b, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_nxv8i8:
 ; CHECK-NEXT: st1b { z0.h }, p0, [x1]
 ; CHECK-NEXT: ret
   %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
-  call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i8> *%b, i32 2, <vscale x 8 x i1> %mask)
+  call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, ptr %b, i32 2, <vscale x 8 x i1> %mask)
   ret void
 }
 
-declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv4i8(<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv8i8(<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
index da338b7418728..e9860222613e9 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-ldst-zext.ll
@@ -5,80 +5,80 @@
 ; Masked Loads
 ;
 
-define <vscale x 2 x i64> @masked_zload_nxv2i8(<vscale x 2 x i8>* %src, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_zload_nxv2i8(ptr %src, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %ext = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_zload_nxv2i16(<vscale x 2 x i16>* %src, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_zload_nxv2i16(ptr %src, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %ext = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_zload_nxv2i32(<vscale x 2 x i32>* %src, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_zload_nxv2i32(ptr %src, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 4 x i32> @masked_zload_nxv4i8(<vscale x 4 x i8>* %src, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_zload_nxv4i8(ptr %src, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %ext = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_zload_nxv4i16(<vscale x 4 x i16>* %src, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_zload_nxv4i16(ptr %src, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %src, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %ext = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 8 x i16> @masked_zload_nxv8i8(<vscale x 8 x i8>* %src, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @masked_zload_nxv8i8(ptr %src, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %src, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %src, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
   %ext = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %ext
 }
 
-define <vscale x 2 x i64> @masked_zload_passthru(<vscale x 2 x i32>* %src, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
+define <vscale x 2 x i64> @masked_zload_passthru(ptr %src, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru) {
 ; CHECK-LABEL: masked_zload_passthru:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
 ; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
 ; CHECK-NEXT:    mov z0.d, p0/m, z1.d
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %src, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> %passthru)
   %ext = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %ext
 }
 
 ; Return type requires splitting
-define <vscale x 8 x i64> @masked_zload_nxv8i16(<vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i64> @masked_zload_nxv8i16(ptr %a, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_nxv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
@@ -89,20 +89,20 @@ define <vscale x 8 x i64> @masked_zload_nxv8i16(<vscale x 8 x i16>* %a, <vscale
 ; CHECK-NEXT:    uunpklo z2.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   %ext = zext <vscale x 8 x i16> %load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %ext
 }
 
 ; Masked load requires promotion
-define <vscale x 2 x double> @masked_zload_2i16_2f64(<vscale x 2 x i16>* noalias %in, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_zload_2i16_2f64(ptr noalias %in, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_zload_2i16_2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ucvtf z0.d, p1/m, z0.d
 ; CHECK-NEXT:    ret
-  %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %wide.load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %in, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %zext = zext <vscale x 2 x i16> %wide.load to <vscale x 2 x i32>
   %res = uitofp <vscale x 2 x i32> %zext to <vscale x 2 x double>
   ret <vscale x 2 x double> %res
@@ -117,7 +117,7 @@ define <vscale x 4 x i64> @masked_zload_4i8_4i64(ptr %a, <vscale x 4 x i1> %b) {
 ; CHECK-NEXT:    uunpklo z0.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i8> zeroinitializer)
+  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i8> zeroinitializer)
   %aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %aext
 }
@@ -129,7 +129,7 @@ define <vscale x 4 x i64> @masked_zload_4i16_4i64(ptr %a, <vscale x 4 x i1> %b)
 ; CHECK-NEXT:    uunpklo z0.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i16> zeroinitializer)
+  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 16, <vscale x 4 x i1> %b, <vscale x 4 x i16> zeroinitializer)
   %aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %aext
 }
@@ -141,7 +141,7 @@ define <vscale x 8 x i32> @masked_zload_8i8_8i32(ptr %a, <vscale x 8 x i1> %b) {
 ; CHECK-NEXT:    uunpklo z0.s, z1.h
 ; CHECK-NEXT:    uunpkhi z1.s, z1.h
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
   %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %aext
 }
@@ -157,7 +157,7 @@ define <vscale x 8 x i64> @masked_zload_8i8_8i64(ptr %a, <vscale x 8 x i1> %b) {
 ; CHECK-NEXT:    uunpklo z2.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %b, <vscale x 8 x i8> zeroinitializer)
   %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %aext
 }
@@ -174,8 +174,8 @@ define <vscale x 4 x i64> @masked_zload_x2_4i8_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
-  %bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
+  %aval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
+  %bval = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i8> zeroinitializer)
   %aext = zext <vscale x 4 x i8> %aval to <vscale x 4 x i64>
   %bext = zext <vscale x 4 x i8> %bval to <vscale x 4 x i64>
   %res = add <vscale x 4 x i64> %aext, %bext
@@ -194,8 +194,8 @@ define <vscale x 4 x i64> @masked_zload_x2_4i16_4i64(ptr %a, ptr %b, <vscale x 4
 ; CHECK-NEXT:    add z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.d, z0.d, z3.d
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
-  %bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
+  %aval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %a, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
+  %bval = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %b, i32 16, <vscale x 4 x i1> %c, <vscale x 4 x i16> zeroinitializer)
   %aext = zext <vscale x 4 x i16> %aval to <vscale x 4 x i64>
   %bext = zext <vscale x 4 x i16> %bval to <vscale x 4 x i64>
   %res = add <vscale x 4 x i64> %aext, %bext
@@ -214,8 +214,8 @@ define <vscale x 8 x i32> @masked_zload_x2_8i8_8i32(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    add z1.s, z1.s, z2.s
 ; CHECK-NEXT:    add z0.s, z0.s, z3.s
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
-  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i32>
   %bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i32>
   %res = add <vscale x 8 x i32> %aext, %bext
@@ -244,8 +244,8 @@ define <vscale x 8 x i64> @masked_zload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8
 ; CHECK-NEXT:    add z1.d, z1.d, z7.d
 ; CHECK-NEXT:    add z0.d, z0.d, z4.d
 ; CHECK-NEXT:    ret
-  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
-  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %aval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %a, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
+  %bval = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %b, i32 16, <vscale x 8 x i1> %c, <vscale x 8 x i8> zeroinitializer)
   %aext = zext <vscale x 8 x i8> %aval to <vscale x 8 x i64>
   %bext = zext <vscale x 8 x i8> %bval to <vscale x 8 x i64>
   %res = add <vscale x 8 x i64> %aext, %bext
@@ -253,10 +253,10 @@ define <vscale x 8 x i64> @masked_zload_x2_8i8_8i64(ptr %a, ptr %b, <vscale x 8
 }
 
 
-declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>*, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
index 5a7287bb61885..e866474942cd7 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter.ll
@@ -1,75 +1,75 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define void @masked_scatter_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
@@ -83,8 +83,8 @@ define void @masked_scatter_splat_constant_pointer (<vscale x 4 x i1> %pg) {
 ; CHECK-NEXT:    st1w { z0.d }, p0, [z0.d]
 ; CHECK-NEXT:    ret
 vector.body:
-  call void @llvm.masked.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32> undef,
-    <vscale x 4 x i32*> shufflevector (<vscale x 4 x i32*> insertelement (<vscale x 4 x i32*> poison, i32* null, i32 0), <vscale x 4 x i32*> poison, <vscale x 4 x i32> zeroinitializer),
+  call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> undef,
+    <vscale x 4 x ptr> shufflevector (<vscale x 4 x ptr> insertelement (<vscale x 4 x ptr> poison, ptr null, i32 0), <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer),
     i32 4,
     <vscale x 4 x i1> %pg)
   ret void
@@ -114,13 +114,13 @@ define void @masked_scatter_non_element_type_based_scaling(<vscale x 2 x double>
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32>, <vscale x 4 x i32*>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
index c5a3945cd0474..0ac8a946ec859 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
@@ -6,7 +6,7 @@
 ; range values are tested only in one case (following). Valid values
 ; are tested all through the rest of the file.
 
-define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @imm_out_of_range(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: imm_out_of_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #8
@@ -16,14 +16,14 @@ define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mas
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8
-  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i64>, ptr %base, i64 8
+  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i64> undef)
-  %base_store = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64> * %base, i64 -9
+  %base_store = getelementptr <vscale x 2 x i64>, ptr %base, i64 -9
   call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data,
-                                       <vscale x 2 x i64>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -31,136 +31,136 @@ define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mas
 
 ; 2-lane contiguous load/stores
 
-define void @test_masked_ldst_sv2i8(<vscale x 2 x i8> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2i8(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 -8
-  %data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i8>, ptr %base, i64 -8
+  %data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
-  %base_store = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8> * %base, i64 -7
+  %base_store = getelementptr <vscale x 2 x i8>, ptr %base, i64 -7
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data,
-                                      <vscale x 2 x i8>* %base_store,
+                                      ptr %base_store,
                                       i32 1,
                                       <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2i16(<vscale x 2 x i16> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2i16(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 -8
-  %data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i16>, ptr %base, i64 -8
+  %data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
-  %base_store = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16> * %base, i64 -7
+  %base_store = getelementptr <vscale x 2 x i16>, ptr %base, i64 -7
   call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %data,
-                                       <vscale x 2 x i16>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
 
-define void @test_masked_ldst_sv2i32(<vscale x 2 x i32> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2i32(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 -8
-  %data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i32>, ptr %base, i64 -8
+  %data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
-  %base_store = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32> * %base, i64 -7
+  %base_store = getelementptr <vscale x 2 x i32>, ptr %base, i64 -7
   call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %data,
-                                       <vscale x 2 x i32>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2i64(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 -8
-  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i64>, ptr %base, i64 -8
+  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i64> undef)
-  %base_store = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64> * %base, i64 -7
+  %base_store = getelementptr <vscale x 2 x i64>, ptr %base, i64 -7
   call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data,
-                                       <vscale x 2 x i64>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2f16(<vscale x 2 x half> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2f16(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x half>, <vscale x 2 x half>* %base, i64 -8
-  %data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %base_load,
+  %base_load = getelementptr <vscale x 2 x half>, ptr %base, i64 -8
+  %data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %base_load,
                                                              i32 1,
                                                              <vscale x 2 x i1> %mask,
                                                              <vscale x 2 x half> undef)
-  %base_store = getelementptr <vscale x 2 x half>, <vscale x 2 x half> * %base, i64 -7
+  %base_store = getelementptr <vscale x 2 x half>, ptr %base, i64 -7
   call void @llvm.masked.store.nxv2f16(<vscale x 2 x half> %data,
-                                       <vscale x 2 x half>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
 
-define void @test_masked_ldst_sv2f32(<vscale x 2 x float> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2f32(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x float>, <vscale x 2 x float>* %base, i64 -8
-  %data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %base_load,
+  %base_load = getelementptr <vscale x 2 x float>, ptr %base, i64 -8
+  %data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %base_load,
                                                               i32 1,
                                                               <vscale x 2 x i1> %mask,
                                                               <vscale x 2 x float> undef)
-  %base_store = getelementptr <vscale x 2 x float>, <vscale x 2 x float> * %base, i64 -7
+  %base_store = getelementptr <vscale x 2 x float>, ptr %base, i64 -7
   call void @llvm.masked.store.nxv2f32(<vscale x 2 x float> %data,
-                                       <vscale x 2 x float>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2f64(<vscale x 2 x double> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2f64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #-6, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #-5, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base, i64 -6
-  %data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %base_load,
+  %base_load = getelementptr <vscale x 2 x double>, ptr %base, i64 -6
+  %data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %base_load,
                                                                i32 1,
                                                                <vscale x 2 x i1> %mask,
                                                                <vscale x 2 x double> undef)
-  %base_store = getelementptr <vscale x 2 x double>, <vscale x 2 x double> * %base, i64 -5
+  %base_store = getelementptr <vscale x 2 x double>, ptr %base, i64 -5
   call void @llvm.masked.store.nxv2f64(<vscale x 2 x double> %data,
-                                       <vscale x 2 x double>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -168,13 +168,13 @@ define void @test_masked_ldst_sv2f64(<vscale x 2 x double> * %base, <vscale x 2
 
 ; 2-lane zero/sign extended contiguous loads.
 
-define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_zload_sv2i8_to_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, #-4, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 -4
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i8>, ptr %base, i64 -4
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
@@ -182,13 +182,13 @@ define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_sload_sv2i8_to_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, #-3, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 -3
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i8>, ptr %base, i64 -3
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
@@ -196,13 +196,13 @@ define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(<vscale x 2 x i8>* %base
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %base, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_zload_sv2i16_to_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 1
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i16>, ptr %base, i64 1
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
@@ -210,13 +210,13 @@ define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %ba
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %base, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_sload_sv2i16_to_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 2
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i16>, ptr %base, i64 2
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
@@ -224,13 +224,13 @@ define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(<vscale x 2 x i16>* %ba
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %base, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_zload_sv2i32_to_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, #-2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 -2
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i32>, ptr %base, i64 -2
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
@@ -238,13 +238,13 @@ define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %ba
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %base, <vscale x 2 x i1> %mask) nounwind {
+define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_sload_sv2i32_to_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 -1
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_load,
+  %base_load = getelementptr <vscale x 2 x i32>, ptr %base, i64 -1
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
@@ -254,44 +254,44 @@ define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(<vscale x 2 x i32>* %ba
 
 ; 2-lane truncating contiguous stores.
 
-define void @masked_trunc_store_sv2i64_to_sv2i8(<vscale x 2 x i64> %val, <vscale x 2 x i8> *%base, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_trunc_store_sv2i64_to_sv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base, i64 3
+  %base_load = getelementptr <vscale x 2 x i8>, ptr %base, i64 3
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc,
-                                      <vscale x 2 x i8> *%base_load,
+                                      ptr %base_load,
                                       i32 1,
                                       <vscale x 2 x i1> %mask)
   ret void
 }
 
 
-define void @masked_trunc_store_sv2i64_to_sv2i16(<vscale x 2 x i64> %val, <vscale x 2 x i16> *%base, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_trunc_store_sv2i64_to_sv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base, i64 4
+  %base_load = getelementptr <vscale x 2 x i16>, ptr %base, i64 4
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
   call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %trunc,
-                                       <vscale x 2 x i16> *%base_load,
+                                       ptr %base_load,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, <vscale x 2 x i32> *%base, <vscale x 2 x i1> %mask) nounwind {
+define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, #5, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base, i64 5
+  %base_load = getelementptr <vscale x 2 x i32>, ptr %base, i64 5
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
   call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %trunc,
-                                       <vscale x 2 x i32> *%base_load,
+                                       ptr %base_load,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -299,96 +299,96 @@ define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, <vscal
 
 ; 4-lane contiguous load/stores.
 
-define void @test_masked_ldst_sv4i8(<vscale x 4 x i8> * %base, <vscale x 4 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv4i8(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 -1
-  %data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 4 x i8>, ptr %base, i64 -1
+  %data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
-  %base_store = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8> * %base, i64 2
+  %base_store = getelementptr <vscale x 4 x i8>, ptr %base, i64 2
   call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %data,
-                                      <vscale x 4 x i8>* %base_store,
+                                      ptr %base_store,
                                       i32 1,
                                       <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4i16(<vscale x 4 x i16> * %base, <vscale x 4 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv4i16(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 -1
-  %data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_load,
+  %base_load = getelementptr <vscale x 4 x i16>, ptr %base, i64 -1
+  %data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
-  %base_store = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16> * %base, i64 2
+  %base_store = getelementptr <vscale x 4 x i16>, ptr %base, i64 2
   call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %data,
-                                       <vscale x 4 x i16>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4i32(<vscale x 4 x i32> * %base, <vscale x 4 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv4i32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base, i64 6
-  %data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %base_load,
+  %base_load = getelementptr <vscale x 4 x i32>, ptr %base, i64 6
+  %data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i32> undef)
-  %base_store = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32> * %base, i64 7
+  %base_store = getelementptr <vscale x 4 x i32>, ptr %base, i64 7
   call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %data,
-                                       <vscale x 4 x i32>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4f16(<vscale x 4 x half> * %base, <vscale x 4 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv4f16(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x half>, <vscale x 4 x half>* %base, i64 -1
-  %data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %base_load,
+  %base_load = getelementptr <vscale x 4 x half>, ptr %base, i64 -1
+  %data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %base_load,
                                                              i32 1,
                                                              <vscale x 4 x i1> %mask,
                                                              <vscale x 4 x half> undef)
-  %base_store = getelementptr <vscale x 4 x half>, <vscale x 4 x half> * %base, i64 2
+  %base_store = getelementptr <vscale x 4 x half>, ptr %base, i64 2
   call void @llvm.masked.store.nxv4f16(<vscale x 4 x half> %data,
-                                       <vscale x 4 x half>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4f32(<vscale x 4 x float> * %base, <vscale x 4 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv4f32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base, i64 -1
-  %data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %base_load,
+  %base_load = getelementptr <vscale x 4 x float>, ptr %base, i64 -1
+  %data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %base_load,
                                                               i32 1,
                                                               <vscale x 4 x i1> %mask,
                                                               <vscale x 4 x float> undef)
-  %base_store = getelementptr <vscale x 4 x float>, <vscale x 4 x float> * %base, i64 2
+  %base_store = getelementptr <vscale x 4 x float>, ptr %base, i64 2
   call void @llvm.masked.store.nxv4f32(<vscale x 4 x float> %data,
-                                       <vscale x 4 x float>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -396,13 +396,13 @@ define void @test_masked_ldst_sv4f32(<vscale x 4 x float> * %base, <vscale x 4 x
 
 ; 4-lane zero/sign extended contiguous loads.
 
-define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_zload_sv4i8_to_sv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, #-4, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 -4
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 4 x i8>, ptr %base, i64 -4
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
@@ -410,13 +410,13 @@ define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_sload_sv4i8_to_sv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, #-3, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 -3
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 4 x i8>, ptr %base, i64 -3
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
@@ -424,13 +424,13 @@ define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(<vscale x 4 x i8>* %base
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %base, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_zload_sv4i16_to_sv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 1
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_load,
+  %base_load = getelementptr <vscale x 4 x i16>, ptr %base, i64 1
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
@@ -438,13 +438,13 @@ define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %ba
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %base, <vscale x 4 x i1> %mask) nounwind {
+define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_sload_sv4i16_to_sv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 2
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_load,
+  %base_load = getelementptr <vscale x 4 x i16>, ptr %base, i64 2
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
@@ -454,30 +454,30 @@ define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(<vscale x 4 x i16>* %ba
 
 ; 4-lane truncating contiguous stores.
 
-define void @masked_trunc_store_sv4i32_to_sv4i8(<vscale x 4 x i32> %val, <vscale x 4 x i8> *%base, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_trunc_store_sv4i32_to_sv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv4i32_to_sv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base, i64 3
+  %base_load = getelementptr <vscale x 4 x i8>, ptr %base, i64 3
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
   call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %trunc,
-                                      <vscale x 4 x i8> *%base_load,
+                                      ptr %base_load,
                                       i32 1,
                                       <vscale x 4 x i1> %mask)
   ret void
 }
 
 
-define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, <vscale x 4 x i16> *%base, <vscale x 4 x i1> %mask) nounwind {
+define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv4i32_to_sv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base, i64 4
+  %base_load = getelementptr <vscale x 4 x i16>, ptr %base, i64 4
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
   call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %trunc,
-                                       <vscale x 4 x i16> *%base_load,
+                                       ptr %base_load,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -485,77 +485,77 @@ define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, <vscal
 
 ; 8-lane contiguous load/stores.
 
-define void @test_masked_ldst_sv8i8(<vscale x 8 x i8> * %base, <vscale x 8 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv8i8(ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 6
-  %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 8 x i8>, ptr %base, i64 6
+  %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
-  %base_store = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8> * %base, i64 7
+  %base_store = getelementptr <vscale x 8 x i8>, ptr %base, i64 7
   call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %data,
-                                      <vscale x 8 x i8>* %base_store,
+                                      ptr %base_store,
                                       i32 1,
                                       <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv8i16(<vscale x 8 x i16> * %base, <vscale x 8 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv8i16(ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base, i64 6
-  %data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %base_load,
+  %base_load = getelementptr <vscale x 8 x i16>, ptr %base, i64 6
+  %data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 8 x i1> %mask,
                                                             <vscale x 8 x i16> undef)
-  %base_store = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16> * %base, i64 7
+  %base_store = getelementptr <vscale x 8 x i16>, ptr %base, i64 7
   call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %data,
-                                       <vscale x 8 x i16>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv8f16(<vscale x 8 x half> * %base, <vscale x 8 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv8f16(ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base, i64 -1
-  %data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %base_load,
+  %base_load = getelementptr <vscale x 8 x half>, ptr %base, i64 -1
+  %data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %base_load,
                                                              i32 1,
                                                              <vscale x 8 x i1> %mask,
                                                              <vscale x 8 x half> undef)
-  %base_store = getelementptr <vscale x 8 x half>, <vscale x 8 x half> * %base, i64 2
+  %base_store = getelementptr <vscale x 8 x half>, ptr %base, i64 2
   call void @llvm.masked.store.nxv8f16(<vscale x 8 x half> %data,
-                                       <vscale x 8 x half>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv8bf16(<vscale x 8 x bfloat> * %base, <vscale x 8 x i1> %mask) nounwind #0 {
+define void @test_masked_ldst_sv8bf16(ptr %base, <vscale x 8 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: test_masked_ldst_sv8bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base, i64 -1
-  %data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>* %base_load,
+  %base_load = getelementptr <vscale x 8 x bfloat>, ptr %base, i64 -1
+  %data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %base_load,
                                                                 i32 1,
                                                                 <vscale x 8 x i1> %mask,
                                                                 <vscale x 8 x bfloat> undef)
-  %base_store = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat> * %base, i64 2
+  %base_store = getelementptr <vscale x 8 x bfloat>, ptr %base, i64 2
   call void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat> %data,
-                                        <vscale x 8 x bfloat>* %base_store,
+                                        ptr %base_store,
                                         i32 1,
                                         <vscale x 8 x i1> %mask)
   ret void
@@ -563,13 +563,13 @@ define void @test_masked_ldst_sv8bf16(<vscale x 8 x bfloat> * %base, <vscale x 8
 
 ; 8-lane zero/sign extended contiguous loads.
 
-define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_zload_sv8i8_to_sv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, #-4, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 -4
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 8 x i8>, ptr %base, i64 -4
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
@@ -577,13 +577,13 @@ define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base
   ret <vscale x 8 x i16> %ext
 }
 
-define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base, <vscale x 8 x i1> %mask) nounwind {
+define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_sload_sv8i8_to_sv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0, #-3, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 -3
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 8 x i8>, ptr %base, i64 -3
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_load,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
@@ -593,15 +593,15 @@ define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(<vscale x 8 x i8>* %base
 
 ; 8-lane truncating contiguous stores.
 
-define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, <vscale x 8 x i8> *%base, <vscale x 8 x i1> %mask) nounwind {
+define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv8i16_to_sv8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base, i64 3
+  %base_load = getelementptr <vscale x 8 x i8>, ptr %base, i64 3
   %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
   call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc,
-                                      <vscale x 8 x i8> *%base_load,
+                                      ptr %base_load,
                                       i32 1,
                                       <vscale x 8 x i1> %mask)
   ret void
@@ -609,74 +609,74 @@ define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, <vscale
 
 ; 16-lane contiguous load/stores.
 
-define void @test_masked_ldst_sv16i8(<vscale x 16 x i8> * %base, <vscale x 16 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv16i8(ptr %base, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 6
-  %data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %base_load,
+  %base_load = getelementptr <vscale x 16 x i8>, ptr %base, i64 6
+  %data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %base_load,
                                                             i32 1,
                                                             <vscale x 16 x i1> %mask,
                                                             <vscale x 16 x i8> undef)
-  %base_store = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8> * %base, i64 7
+  %base_store = getelementptr <vscale x 16 x i8>, ptr %base, i64 7
   call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %data,
-                                       <vscale x 16 x i8>* %base_store,
+                                       ptr %base_store,
                                        i32 1,
                                        <vscale x 16 x i1> %mask)
   ret void
 }
 
 ; 2-element contiguous loads.
-declare <vscale x 2 x i8>  @llvm.masked.load.nxv2i8 (<vscale x 2 x i8>* , i32, <vscale x 2 x i1>, <vscale x 2 x i8> )
-declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>*, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>*, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>*, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>*, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i8>  @llvm.masked.load.nxv2i8 (ptr , i32, <vscale x 2 x i1>, <vscale x 2 x i8> )
+declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 
 ; 4-element contiguous loads.
-declare <vscale x 4 x i8>  @llvm.masked.load.nxv4i8 (<vscale x 4 x i8>* , i32, <vscale x 4 x i1>, <vscale x 4 x i8> )
-declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>*, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>*, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 4 x i8>  @llvm.masked.load.nxv4i8 (ptr , i32, <vscale x 4 x i1>, <vscale x 4 x i8> )
+declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 
 ; 8-element contiguous loads.
-declare <vscale x 8 x i8>  @llvm.masked.load.nxv8i8 (<vscale x 8 x i8>* , i32, <vscale x 8 x i1>, <vscale x 8 x i8> )
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>*, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
-declare <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>*, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
+declare <vscale x 8 x i8>  @llvm.masked.load.nxv8i8 (ptr , i32, <vscale x 8 x i1>, <vscale x 8 x i8> )
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
+declare <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
 
 ; 16-element contiguous loads.
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
 
 ; 2-element contiguous stores.
-declare void @llvm.masked.store.nxv2i8 (<vscale x 2 x i8> , <vscale x 2 x i8>* , i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i8 (<vscale x 2 x i8> , ptr , i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i64(<vscale x 2 x i64>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f16(<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f32(<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f64(<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>)
 
 ; 4-element contiguous stores.
-declare void @llvm.masked.store.nxv4i8 (<vscale x 4 x i8> , <vscale x 4 x i8>* , i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i8 (<vscale x 4 x i8> , ptr , i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f16(<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f32(<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>)
 
 ; 8-element contiguous stores.
-declare void @llvm.masked.store.nxv8i8 (<vscale x 8 x i8> , <vscale x 8 x i8>* , i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>*, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i8 (<vscale x 8 x i8> , ptr , i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8f16(<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat>, ptr, i32, <vscale x 8 x i1>)
 
 ; 16-element contiguous stores.
-declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
index fe14b9602093e..32f43a8c9ff79 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
@@ -8,12 +8,12 @@ define void @test_masked_ldst_sv2i8(ptr %base, <vscale x 2 x i1> %mask, i64 %off
 ; CHECK-NEXT: st1b { z[[DATA]].d }, p0, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_i8,
+  %data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data,
-                                      <vscale x 2 x i8>* %base_i8,
+                                      ptr %base_i8,
                                       i32 1,
                                       <vscale x 2 x i1> %mask)
   ret void
@@ -25,12 +25,12 @@ define void @test_masked_ldst_sv2i16(ptr %base, <vscale x 2 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1h { z[[DATA]].d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
-  %data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_i16,
+  %data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_i16,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
   call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %data,
-                                       <vscale x 2 x i16>* %base_i16,
+                                       ptr %base_i16,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -42,12 +42,12 @@ define void @test_masked_ldst_sv2i32(ptr %base, <vscale x 2 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1w  { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
   %base_i32 = getelementptr i32, ptr %base, i64 %offset
-  %data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_i32,
+  %data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_i32,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
   call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %data,
-                                       <vscale x 2 x i32>* %base_i32,
+                                       ptr %base_i32,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -59,12 +59,12 @@ define void @test_masked_ldst_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1d  { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT: ret
   %base_i64 = getelementptr i64, ptr %base, i64 %offset
-  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_i64,
+  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %base_i64,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i64> undef)
   call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data,
-                                       <vscale x 2 x i64>* %base_i64,
+                                       ptr %base_i64,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -76,12 +76,12 @@ define void @test_masked_ldst_sv2f16(ptr %base, <vscale x 2 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1h { z[[DATA]].d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_half = getelementptr half, ptr %base, i64 %offset
-  %data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %base_half,
+  %data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr %base_half,
                                                              i32 1,
                                                              <vscale x 2 x i1> %mask,
                                                              <vscale x 2 x half> undef)
   call void @llvm.masked.store.nxv2f16(<vscale x 2 x half> %data,
-                                       <vscale x 2 x half>* %base_half,
+                                       ptr %base_half,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -93,12 +93,12 @@ define void @test_masked_ldst_sv2f32(ptr %base, <vscale x 2 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1w { z[[DATA]].d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
   %base_float = getelementptr float, ptr %base, i64 %offset
-  %data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %base_float,
+  %data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr %base_float,
                                                               i32 1,
                                                               <vscale x 2 x i1> %mask,
                                                               <vscale x 2 x float> undef)
   call void @llvm.masked.store.nxv2f32(<vscale x 2 x float> %data,
-                                       <vscale x 2 x float>* %base_float,
+                                       ptr %base_float,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -110,12 +110,12 @@ define void @test_masked_ldst_sv2f64(ptr %base, <vscale x 2 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1d { z[[DATA]].d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT: ret
   %base_double = getelementptr double, ptr %base, i64 %offset
-  %data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %base_double,
+  %data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr %base_double,
                                                                i32 1,
                                                                <vscale x 2 x i1> %mask,
                                                                <vscale x 2 x double> undef)
   call void @llvm.masked.store.nxv2f64(<vscale x 2 x double> %data,
-                                       <vscale x 2 x double>* %base_double,
+                                       ptr %base_double,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -128,7 +128,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x
 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_i8,
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
@@ -141,7 +141,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x
 ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_i8,
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
@@ -154,7 +154,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(ptr %base, <vscale x 2
 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_i16,
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_i16,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
@@ -167,7 +167,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(ptr %base, <vscale x 2
 ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_i16,
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr %base_i16,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
@@ -181,7 +181,7 @@ define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(ptr %base, <vscale x 2
 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
   %base_i32 = getelementptr i32, ptr %base, i64 %offset
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_i32,
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_i32,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
@@ -194,7 +194,7 @@ define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(ptr %base, <vscale x 2
 ; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
   %base_i32 = getelementptr i32, ptr %base, i64 %offset
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_i32,
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %base_i32,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
@@ -211,7 +211,7 @@ define void @masked_trunc_store_sv2i64_to_sv2i8(<vscale x 2 x i64> %val, ptr %ba
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc,
-                                      <vscale x 2 x i8> *%base_i8,
+                                      ptr %base_i8,
                                       i32 1,
                                       <vscale x 2 x i1> %mask)
   ret void
@@ -224,7 +224,7 @@ define void @masked_trunc_store_sv2i64_to_sv2i16(<vscale x 2 x i64> %val, ptr %b
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
   call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %trunc,
-                                       <vscale x 2 x i16> *%base_i16,
+                                       ptr %base_i16,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -237,7 +237,7 @@ define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, ptr %b
   %base_i32 = getelementptr i32, ptr %base, i64 %offset
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
   call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %trunc,
-                                       <vscale x 2 x i32> *%base_i32,
+                                       ptr %base_i32,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -251,12 +251,12 @@ define void @test_masked_ldst_sv4i8(ptr %base, <vscale x 4 x i1> %mask, i64 %off
 ; CHECK-NEXT: st1b { z[[DATA]].s }, p0, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_i8,
+  %data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
   call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %data,
-                                      <vscale x 4 x i8>* %base_i8,
+                                      ptr %base_i8,
                                       i32 1,
                                       <vscale x 4 x i1> %mask)
   ret void
@@ -268,12 +268,12 @@ define void @test_masked_ldst_sv4i16(ptr %base, <vscale x 4 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1h { z[[DATA]].s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
-  %data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_i16,
+  %data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_i16,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
   call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %data,
-                                       <vscale x 4 x i16>* %base_i16,
+                                       ptr %base_i16,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -285,12 +285,12 @@ define void @test_masked_ldst_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1w { z[[DATA]].s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
   %base_i32 = getelementptr i32, ptr %base, i64 %offset
-  %data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %base_i32,
+  %data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %base_i32,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i32> undef)
   call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %data,
-                                       <vscale x 4 x i32>* %base_i32,
+                                       ptr %base_i32,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -302,12 +302,12 @@ define void @test_masked_ldst_sv4f16(ptr %base, <vscale x 4 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1h { z[[DATA]].s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_f16 = getelementptr half, ptr %base, i64 %offset
-  %data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %base_f16,
+  %data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr %base_f16,
                                                              i32 1,
                                                              <vscale x 4 x i1> %mask,
                                                              <vscale x 4 x half> undef)
   call void @llvm.masked.store.nxv4f16(<vscale x 4 x half> %data,
-                                       <vscale x 4 x half>* %base_f16,
+                                       ptr %base_f16,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -319,12 +319,12 @@ define void @test_masked_ldst_sv4f32(ptr %base, <vscale x 4 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1w { z[[DATA]].s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
   %base_f32 = getelementptr float, ptr %base, i64 %offset
-  %data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %base_f32,
+  %data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr %base_f32,
                                                               i32 1,
                                                               <vscale x 4 x i1> %mask,
                                                               <vscale x 4 x float> undef)
   call void @llvm.masked.store.nxv4f32(<vscale x 4 x float> %data,
-                                       <vscale x 4 x float>* %base_f32,
+                                       ptr %base_f32,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -337,7 +337,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x
 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_i8,
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
@@ -350,7 +350,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x
 ; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_i8,
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
@@ -363,7 +363,7 @@ define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(ptr %base, <vscale x 4
 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_i16,
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_i16,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
@@ -376,7 +376,7 @@ define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(ptr %base, <vscale x 4
 ; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_i16,
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr %base_i16,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
@@ -393,7 +393,7 @@ define void @masked_trunc_store_sv4i32_to_sv4i8(<vscale x 4 x i32> %val, ptr %ba
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
   call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %trunc,
-                                      <vscale x 4 x i8> *%base_i8,
+                                      ptr %base_i8,
                                       i32 1,
                                       <vscale x 4 x i1> %mask)
   ret void
@@ -406,7 +406,7 @@ define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, ptr %b
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
   call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %trunc,
-                                       <vscale x 4 x i16> *%base_i16,
+                                       ptr %base_i16,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -420,12 +420,12 @@ define void @test_masked_ldst_sv8i8(ptr %base, <vscale x 8 x i1> %mask, i64 %off
 ; CHECK-NEXT: st1b { z[[DATA]].h }, p0, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_i8,
+  %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
   call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %data,
-                                      <vscale x 8 x i8>* %base_i8,
+                                      ptr %base_i8,
                                       i32 1,
                                       <vscale x 8 x i1> %mask)
   ret void
@@ -437,12 +437,12 @@ define void @test_masked_ldst_sv8i16(ptr %base, <vscale x 8 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_i16 = getelementptr i16, ptr %base, i64 %offset
-  %data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %base_i16,
+  %data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %base_i16,
                                                             i32 1,
                                                             <vscale x 8 x i1> %mask,
                                                             <vscale x 8 x i16> undef)
   call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %data,
-                                       <vscale x 8 x i16>* %base_i16,
+                                       ptr %base_i16,
                                        i32 1,
                                        <vscale x 8 x i1> %mask)
   ret void
@@ -454,12 +454,12 @@ define void @test_masked_ldst_sv8f16(ptr %base, <vscale x 8 x i1> %mask, i64 %of
 ; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_f16 = getelementptr half, ptr %base, i64 %offset
-  %data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %base_f16,
+  %data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr %base_f16,
                                                              i32 1,
                                                              <vscale x 8 x i1> %mask,
                                                              <vscale x 8 x half> undef)
   call void @llvm.masked.store.nxv8f16(<vscale x 8 x half> %data,
-                                       <vscale x 8 x half>* %base_f16,
+                                       ptr %base_f16,
                                        i32 1,
                                        <vscale x 8 x i1> %mask)
   ret void
@@ -471,12 +471,12 @@ define void @test_masked_ldst_sv8bf16(ptr %base, <vscale x 8 x i1> %mask, i64 %o
 ; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
   %base_f16 = getelementptr bfloat, ptr %base, i64 %offset
-  %data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>* %base_f16,
+  %data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr %base_f16,
                                                                i32 1,
                                                                <vscale x 8 x i1> %mask,
                                                                <vscale x 8 x bfloat> undef)
   call void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat> %data,
-                                        <vscale x 8 x bfloat>* %base_f16,
+                                        ptr %base_f16,
                                         i32 1,
                                         <vscale x 8 x i1> %mask)
   ret void
@@ -489,7 +489,7 @@ define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x
 ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_i8,
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
@@ -502,7 +502,7 @@ define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x
 ; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_i8,
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(ptr %base_i8,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
@@ -519,7 +519,7 @@ define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, ptr %ba
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
   %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
   call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc,
-                                      <vscale x 8 x i8> *%base_i8,
+                                      ptr %base_i8,
                                       i32 1,
                                       <vscale x 8 x i1> %mask)
   ret void
@@ -533,66 +533,66 @@ define void @test_masked_ldst_sv16i8(ptr %base, <vscale x 16 x i1> %mask, i64 %o
 ; CHECK-NEXT: st1b { z[[DATA]].b }, p0, [x0, x1]
 ; CHECK-NEXT: ret
   %base_i8 = getelementptr i8, ptr %base, i64 %offset
-  %data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %base_i8,
+  %data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %base_i8,
                                                             i32 1,
                                                             <vscale x 16 x i1> %mask,
                                                             <vscale x 16 x i8> undef)
   call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %data,
-                                       <vscale x 16 x i8>* %base_i8,
+                                       ptr %base_i8,
                                        i32 1,
                                        <vscale x 16 x i1> %mask)
   ret void
 }
 
 ; 2-element contiguous loads.
-declare <vscale x 2 x i8>  @llvm.masked.load.nxv2i8 (<vscale x 2 x i8>* , i32, <vscale x 2 x i1>, <vscale x 2 x i8> )
-declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>*, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>*, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>*, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>*, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i8>  @llvm.masked.load.nxv2i8 (ptr , i32, <vscale x 2 x i1>, <vscale x 2 x i8> )
+declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 
 ; 4-element contiguous loads.
-declare <vscale x 4 x i8>  @llvm.masked.load.nxv4i8 (<vscale x 4 x i8>* , i32, <vscale x 4 x i1>, <vscale x 4 x i8> )
-declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>*, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>*, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 4 x i8>  @llvm.masked.load.nxv4i8 (ptr , i32, <vscale x 4 x i1>, <vscale x 4 x i8> )
+declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 
 ; 8-element contiguous loads.
-declare <vscale x 8 x i8>  @llvm.masked.load.nxv8i8 (<vscale x 8 x i8>* , i32, <vscale x 8 x i1>, <vscale x 8 x i8> )
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>*, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
-declare <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>*, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
+declare <vscale x 8 x i8>  @llvm.masked.load.nxv8i8 (ptr , i32, <vscale x 8 x i1>, <vscale x 8 x i8> )
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
+declare <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
 
 ; 16-element contiguous loads.
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
 
 ; 2-element contiguous stores.
-declare void @llvm.masked.store.nxv2i8 (<vscale x 2 x i8> , <vscale x 2 x i8>* , i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>*, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i8 (<vscale x 2 x i8> , ptr , i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2i64(<vscale x 2 x i64>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f16(<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f32(<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv2f64(<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>)
 
 ; 4-element contiguous stores.
-declare void @llvm.masked.store.nxv4i8 (<vscale x 4 x i8> , <vscale x 4 x i8>* , i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>*, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.store.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i8 (<vscale x 4 x i8> , ptr , i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f16(<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv4f32(<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>)
 
 ; 8-element contiguous stores.
-declare void @llvm.masked.store.nxv8i8 (<vscale x 8 x i8> , <vscale x 8 x i8>* , i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>*, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i8 (<vscale x 8 x i8> , ptr , i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8f16(<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat>, ptr, i32, <vscale x 8 x i1>)
 
 ; 16-element contiguous stores.
-declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
index c06921ee96a0d..af8f642037466 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
@@ -6,7 +6,7 @@
 ; range values are tested only in one case (following). Valid values
 ; are tested all through the rest of the file.
 
-define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @imm_out_of_range(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: imm_out_of_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #8
@@ -16,203 +16,203 @@ define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mas
 ; CHECK-NEXT:    add x8, x0, x8
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8
-  %base_load_bc = bitcast <vscale x 2 x i64>* %base_load to i64*
+  %base_load = getelementptr <vscale x 2 x i64>, ptr %base, i64 8
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %mask,
-                                                                  i64* %base_load_bc)
-  %base_store = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64> * %base, i64 -9
-  %base_store_bc = bitcast <vscale x 2 x i64>* %base_store to i64*
+                                                                  ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 2 x i64>, ptr %base, i64 -9
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data,
                                             <vscale x 2 x i1> %mask,
-                                            i64* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
 ; 2-lane non-temporal load/stores
 
 
-define void @test_masked_ldst_sv2i64(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2i64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 -8
-  %base_load_bc = bitcast <vscale x 2 x i64>* %base_load to i64*
+  %base_load = getelementptr <vscale x 2 x i64>, ptr %base, i64 -8
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %mask,
-                                                                  i64* %base_load_bc)
-  %base_store = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64> * %base, i64 -7
-  %base_store_bc = bitcast <vscale x 2 x i64>* %base_store to i64*
+                                                                  ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 2 x i64>, ptr %base, i64 -7
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data,
                                             <vscale x 2 x i1> %mask,
-                                            i64* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
-define void @test_masked_ldst_sv2f64(<vscale x 2 x double> * %base, <vscale x 2 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv2f64(ptr %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [x0, #-6, mul vl]
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [x0, #-5, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base, i64 -6
-  %base_load_bc = bitcast <vscale x 2 x double>* %base_load to double*
+  %base_load = getelementptr <vscale x 2 x double>, ptr %base, i64 -6
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1> %mask,
-                                                                    double* %base_load_bc)
-  %base_store = getelementptr <vscale x 2 x double>, <vscale x 2 x double> * %base, i64 -5
-  %base_store_bc = bitcast <vscale x 2 x double>* %base_store to double*
+                                                                    ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 2 x double>, ptr %base, i64 -5
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double> %data,
                                             <vscale x 2 x i1> %mask,
-                                            double* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
 ; 4-lane non-temporal load/stores.
 
-define void @test_masked_ldst_sv4i32(<vscale x 4 x i32> * %base, <vscale x 4 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv4i32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    stnt1w { z0.s }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base, i64 6
-  %base_load_bc = bitcast <vscale x 4 x i32>* %base_load to i32*
+  %base_load = getelementptr <vscale x 4 x i32>, ptr %base, i64 6
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %mask,
-                                                                  i32* %base_load_bc)
-  %base_store = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32> * %base, i64 7
-  %base_store_bc = bitcast <vscale x 4 x i32>* %base_store to i32*
+                                                                  ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 4 x i32>, ptr %base, i64 7
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data,
                                             <vscale x 4 x i1> %mask,
-                                            i32* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
-define void @test_masked_ldst_sv4f32(<vscale x 4 x float> * %base, <vscale x 4 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv4f32(ptr %base, <vscale x 4 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    stnt1w { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base, i64 -1
-  %base_load_bc = bitcast <vscale x 4 x float>* %base_load to float*
+  %base_load = getelementptr <vscale x 4 x float>, ptr %base, i64 -1
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1> %mask,
-                                                                    float* %base_load_bc)
-  %base_store = getelementptr <vscale x 4 x float>, <vscale x 4 x float> * %base, i64 2
-  %base_store_bc = bitcast <vscale x 4 x float>* %base_store to float*
+                                                                    ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 4 x float>, ptr %base, i64 2
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float> %data,
                                             <vscale x 4 x i1> %mask,
-                                            float* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
 
 ; 8-lane non-temporal load/stores.
 
-define void @test_masked_ldst_sv8i16(<vscale x 8 x i16> * %base, <vscale x 8 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv8i16(ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.h }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    stnt1h { z0.h }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base, i64 6
-  %base_load_bc = bitcast <vscale x 8 x i16>* %base_load to i16*
+  %base_load = getelementptr <vscale x 8 x i16>, ptr %base, i64 6
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %mask,
-                                                                  i16* %base_load_bc)
-  %base_store = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16> * %base, i64 7
-  %base_store_bc = bitcast <vscale x 8 x i16>* %base_store to i16*
+                                                                  ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 8 x i16>, ptr %base, i64 7
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data,
                                             <vscale x 8 x i1> %mask,
-                                            i16* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
-define void @test_masked_ldst_sv8f16(<vscale x 8 x half> * %base, <vscale x 8 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv8f16(ptr %base, <vscale x 8 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.h }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    stnt1h { z0.h }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base, i64 -1
-  %base_load_bc = bitcast <vscale x 8 x half>* %base_load to half*
+  %base_load = getelementptr <vscale x 8 x half>, ptr %base, i64 -1
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1> %mask,
-                                                                   half* %base_load_bc)
-  %base_store = getelementptr <vscale x 8 x half>, <vscale x 8 x half> * %base, i64 2
-  %base_store_bc = bitcast <vscale x 8 x half>* %base_store to half*
+                                                                   ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 8 x half>, ptr %base, i64 2
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half> %data,
                                             <vscale x 8 x i1> %mask,
-                                            half* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
-define void @test_masked_ldst_sv8bf16(<vscale x 8 x bfloat> * %base, <vscale x 8 x i1> %mask) nounwind #0 {
+define void @test_masked_ldst_sv8bf16(ptr %base, <vscale x 8 x i1> %mask) nounwind #0 {
 ; CHECK-LABEL: test_masked_ldst_sv8bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.h }, p0/z, [x0, #-1, mul vl]
 ; CHECK-NEXT:    stnt1h { z0.h }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base, i64 -1
-  %base_load_bc = bitcast <vscale x 8 x bfloat>* %base_load to bfloat*
+  %base_load = getelementptr <vscale x 8 x bfloat>, ptr %base, i64 -1
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1> %mask,
-                                                                      bfloat* %base_load_bc)
-  %base_store = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat> * %base, i64 2
-  %base_store_bc = bitcast <vscale x 8 x bfloat>* %base_store to bfloat*
+                                                                      ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 8 x bfloat>, ptr %base, i64 2
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat> %data,
                                              <vscale x 8 x i1> %mask,
-                                             bfloat* %base_store_bc)
+                                             ptr %base_store_bc)
   ret void
 }
 
 ; 16-lane non-temporal load/stores.
 
-define void @test_masked_ldst_sv16i8(<vscale x 16 x i8> * %base, <vscale x 16 x i1> %mask) nounwind {
+define void @test_masked_ldst_sv16i8(ptr %base, <vscale x 16 x i1> %mask) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1b { z0.b }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    stnt1b { z0.b }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_load = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 6
-  %base_load_bc = bitcast <vscale x 16 x i8>* %base_load to i8*
+  %base_load = getelementptr <vscale x 16 x i8>, ptr %base, i64 6
+  %base_load_bc = bitcast ptr %base_load to ptr
   %data = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %mask,
-                                                                  i8* %base_load_bc)
-  %base_store = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8> * %base, i64 7
-  %base_store_bc = bitcast <vscale x 16 x i8>* %base_store to i8*
+                                                                  ptr %base_load_bc)
+  %base_store = getelementptr <vscale x 16 x i8>, ptr %base, i64 7
+  %base_store_bc = bitcast ptr %base_store to ptr
   call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data,
                                             <vscale x 16 x i1> %mask,
-                                            i8* %base_store_bc)
+                                            ptr %base_store_bc)
   ret void
 }
 
 ; 2-element non-temporal loads.
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; 4-element non-temporal loads.
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, ptr)
 
 ; 8-element non-temporal loads.
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>, ptr)
 
 ; 16-element non-temporal loads.
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, ptr)
 
 ; 2-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
+declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr)
 
 ; 4-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
+declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr)
 
 ; 8-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
+declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
 
 ; 16-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
+declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-redundant-store.ll b/llvm/test/CodeGen/AArch64/sve-redundant-store.ll
index b5799ae709647..6873404724f1d 100644
--- a/llvm/test/CodeGen/AArch64/sve-redundant-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-redundant-store.ll
@@ -15,7 +15,7 @@ define void @redundant_store(ptr nocapture %p, <vscale x 4 x i32> %v) {
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   store i32 1, ptr %p, align 4
-  store <vscale x 4 x i32> %v, <vscale x 4 x i32>* %p, align 16
+  store <vscale x 4 x i32> %v, ptr %p, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-setcc.ll b/llvm/test/CodeGen/AArch64/sve-setcc.ll
index 7f9d9e86d4584..d4c17c1bd838c 100644
--- a/llvm/test/CodeGen/AArch64/sve-setcc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-setcc.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
 ; Ensure we use the CC result of SVE compare instructions when branching.
-define void @sve_cmplt_setcc(<vscale x 8 x i16>* %out, <vscale x 8 x i16> %in, <vscale x 8 x i1> %pg) {
+define void @sve_cmplt_setcc(ptr %out, <vscale x 8 x i16> %in, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: sve_cmplt_setcc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmplt p1.h, p0/z, z0.h, #0
@@ -17,7 +17,7 @@ entry:
   br i1 %1, label %if.then, label %if.end
 
 if.then:
-  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
+  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, ptr %out, i32 2, <vscale x 8 x i1> %pg)
   br label %if.end
 
 if.end:
@@ -25,7 +25,7 @@ if.end:
 }
 
 ; Ensure we use the inverted CC result of SVE compare instructions when branching.
-define void @sve_cmplt_setcc_inverted(<vscale x 8 x i16>* %out, <vscale x 8 x i16> %in, <vscale x 8 x i1> %pg) {
+define void @sve_cmplt_setcc_inverted(ptr %out, <vscale x 8 x i16> %in, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: sve_cmplt_setcc_inverted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmplt p1.h, p0/z, z0.h, #0
@@ -40,7 +40,7 @@ entry:
   br i1 %1, label %if.end, label %if.then
 
 if.then:
-  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
+  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, ptr %out, i32 2, <vscale x 8 x i1> %pg)
   br label %if.end
 
 if.end:
@@ -48,7 +48,7 @@ if.end:
 }
 
 ; Ensure we combine setcc and csel so as to not end up with an extra compare
-define void @sve_cmplt_setcc_hslo(<vscale x 8 x i16>* %out, <vscale x 8 x i16> %in, <vscale x 8 x i1> %pg) {
+define void @sve_cmplt_setcc_hslo(ptr %out, <vscale x 8 x i16> %in, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: sve_cmplt_setcc_hslo:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p1.h
@@ -66,7 +66,7 @@ entry:
   br i1 %1, label %if.then, label %if.end
 
 if.then:
-  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
+  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, ptr %out, i32 2, <vscale x 8 x i1> %pg)
   br label %if.end
 
 if.end:
@@ -123,4 +123,4 @@ declare i1 @llvm.aarch64.sve.ptest.last.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x
 
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
 
-declare void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-load.ll b/llvm/test/CodeGen/AArch64/sve-split-load.ll
index b8a5e1141cdf1..af03059cf0d8b 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-load.ll
@@ -3,28 +3,28 @@
 
 ; UNPREDICATED
 
-define <vscale x 4 x i16> @load_promote_4i16(<vscale x 4 x i16>* %a) {
+define <vscale x 4 x i16> @load_promote_4i16(ptr %a) {
 ; CHECK-LABEL: load_promote_4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
+  %load = load <vscale x 4 x i16>, ptr %a
   ret <vscale x 4 x i16> %load
 }
 
-define <vscale x 16 x i16> @load_split_i16(<vscale x 16 x i16>* %a) {
+define <vscale x 16 x i16> @load_split_i16(ptr %a) {
 ; CHECK-LABEL: load_split_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 16 x i16>, <vscale x 16 x i16>* %a
+  %load = load <vscale x 16 x i16>, ptr %a
   ret <vscale x 16 x i16> %load
 }
 
-define <vscale x 24 x i16> @load_split_24i16(<vscale x 24 x i16>* %a) {
+define <vscale x 24 x i16> @load_split_24i16(ptr %a) {
 ; CHECK-LABEL: load_split_24i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -32,11 +32,11 @@ define <vscale x 24 x i16> @load_split_24i16(<vscale x 24 x i16>* %a) {
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 24 x i16>, <vscale x 24 x i16>* %a
+  %load = load <vscale x 24 x i16>, ptr %a
   ret <vscale x 24 x i16> %load
 }
 
-define <vscale x 32 x i16> @load_split_32i16(<vscale x 32 x i16>* %a) {
+define <vscale x 32 x i16> @load_split_32i16(ptr %a) {
 ; CHECK-LABEL: load_split_32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
@@ -45,11 +45,11 @@ define <vscale x 32 x i16> @load_split_32i16(<vscale x 32 x i16>* %a) {
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1h { z3.h }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 32 x i16>, <vscale x 32 x i16>* %a
+  %load = load <vscale x 32 x i16>, ptr %a
   ret <vscale x 32 x i16> %load
 }
 
-define <vscale x 16 x i64> @load_split_16i64(<vscale x 16 x i64>* %a) {
+define <vscale x 16 x i64> @load_split_16i64(ptr %a) {
 ; CHECK-LABEL: load_split_16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -62,32 +62,32 @@ define <vscale x 16 x i64> @load_split_16i64(<vscale x 16 x i64>* %a) {
 ; CHECK-NEXT:    ld1d { z6.d }, p0/z, [x0, #6, mul vl]
 ; CHECK-NEXT:    ld1d { z7.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %load = load <vscale x 16 x i64>, <vscale x 16 x i64>* %a
+  %load = load <vscale x 16 x i64>, ptr %a
   ret <vscale x 16 x i64> %load
 }
 
 ; MASKED
 
-define <vscale x 2 x i32> @masked_load_promote_2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %pg) {
+define <vscale x 2 x i32> @masked_load_promote_2i32(ptr %a, <vscale x 2 x i1> %pg) {
 ; CHECK-LABEL: masked_load_promote_2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> undef)
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> undef)
   ret <vscale x 2 x i32> %load
 }
 
-define <vscale x 32 x i8> @masked_load_split_32i8(<vscale x 32 x i8> *%a, <vscale x 32 x i1> %pg) {
+define <vscale x 32 x i8> @masked_load_split_32i8(ptr %a, <vscale x 32 x i1> %pg) {
 ; CHECK-LABEL: masked_load_split_32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z1.b }, p1/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> undef)
+  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> undef)
   ret <vscale x 32 x i8> %load
 }
 
-define <vscale x 32 x i16> @masked_load_split_32i16(<vscale x 32 x i16> *%a, <vscale x 32 x i1> %pg) {
+define <vscale x 32 x i16> @masked_load_split_32i16(ptr %a, <vscale x 32 x i1> %pg) {
 ; CHECK-LABEL: masked_load_split_32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpklo p2.h, p0.b
@@ -99,11 +99,11 @@ define <vscale x 32 x i16> @masked_load_split_32i16(<vscale x 32 x i16> *%a, <vs
 ; CHECK-NEXT:    ld1h { z2.h }, p3/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1h { z3.h }, p1/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16> *%a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> undef)
+  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> undef)
   ret <vscale x 32 x i16> %load
 }
 
-define <vscale x 8 x i32> @masked_load_split_8i32(<vscale x 8 x i32> *%a, <vscale x 8 x i1> %pg) {
+define <vscale x 8 x i32> @masked_load_split_8i32(ptr %a, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: masked_load_split_8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpklo p1.h, p0.b
@@ -111,11 +111,11 @@ define <vscale x 8 x i32> @masked_load_split_8i32(<vscale x 8 x i32> *%a, <vscal
 ; CHECK-NEXT:    ld1w { z0.s }, p1/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> undef)
+  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> undef)
   ret <vscale x 8 x i32> %load
 }
 
-define <vscale x 8 x i64> @masked_load_split_8i64(<vscale x 8 x i64> *%a, <vscale x 8 x i1> %pg) {
+define <vscale x 8 x i64> @masked_load_split_8i64(ptr %a, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: masked_load_split_8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpklo p1.h, p0.b
@@ -129,15 +129,15 @@ define <vscale x 8 x i64> @masked_load_split_8i64(<vscale x 8 x i64> *%a, <vscal
 ; CHECK-NEXT:    ld1d { z2.d }, p3/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ld1d { z3.d }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64> *%a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> undef)
+  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> undef)
   ret <vscale x 8 x i64> %load
 }
 
-declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>*, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
+declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
 
-declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>*, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
+declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
 
-declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>*, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
+declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
 
-declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>*, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
+declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-store.ll b/llvm/test/CodeGen/AArch64/sve-split-store.ll
index cd46430c1efd4..90ec783ea4dbc 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-store.ll
@@ -3,28 +3,28 @@
 
 ; UNPREDICATED
 
-define void @store_promote_4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8>* %a) {
+define void @store_promote_4i8(<vscale x 4 x i8> %data, ptr %a) {
 ; CHECK-LABEL: store_promote_4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  store <vscale x 4 x i8> %data, <vscale x 4 x i8>* %a
+  store <vscale x 4 x i8> %data, ptr %a
   ret void
 }
 
-define void @store_split_i16(<vscale x 16 x i16> %data, <vscale x 16 x i16>* %a) {
+define void @store_split_i16(<vscale x 16 x i16> %data, ptr %a) {
 ; CHECK-LABEL: store_split_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
-  store <vscale x 16 x i16> %data, <vscale x 16 x i16>* %a
+  store <vscale x 16 x i16> %data, ptr %a
   ret void
 }
 
-define void @store_split_16i32(<vscale x 16 x i32> %data, <vscale x 16 x i32>* %a) {
+define void @store_split_16i32(<vscale x 16 x i32> %data, ptr %a) {
 ; CHECK-LABEL: store_split_16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -33,11 +33,11 @@ define void @store_split_16i32(<vscale x 16 x i32> %data, <vscale x 16 x i32>* %
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  store <vscale x 16 x i32> %data, <vscale x 16 x i32>* %a
+  store <vscale x 16 x i32> %data, ptr %a
   ret void
 }
 
-define void @store_split_16i64(<vscale x 16 x i64> %data, <vscale x 16 x i64>* %a) {
+define void @store_split_16i64(<vscale x 16 x i64> %data, ptr %a) {
 ; CHECK-LABEL: store_split_16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -50,32 +50,32 @@ define void @store_split_16i64(<vscale x 16 x i64> %data, <vscale x 16 x i64>* %
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  store <vscale x 16 x i64> %data, <vscale x 16 x i64>* %a
+  store <vscale x 16 x i64> %data, ptr %a
   ret void
 }
 
 ; MASKED
 
-define void @masked_store_promote_2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8> *%a, <vscale x 2 x i1> %pg) {
+define void @masked_store_promote_2i8(<vscale x 2 x i8> %data, ptr %a, <vscale x 2 x i1> %pg) {
 ; CHECK-LABEL: masked_store_promote_2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8> *%a, i32 1, <vscale x 2 x i1> %pg)
+  call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data, ptr %a, i32 1, <vscale x 2 x i1> %pg)
   ret void
 }
 
-define void @masked_store_split_32i8(<vscale x 32 x i8> %data, <vscale x 32 x i8> *%a, <vscale x 32 x i1> %pg) {
+define void @masked_store_split_32i8(<vscale x 32 x i8> %data, ptr %a, <vscale x 32 x i1> %pg) {
 ; CHECK-LABEL: masked_store_split_32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z1.b }, p1, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv32i8(<vscale x 32 x i8> %data, <vscale x 32 x i8> *%a, i32 1, <vscale x 32 x i1> %pg)
+  call void @llvm.masked.store.nxv32i8(<vscale x 32 x i8> %data, ptr %a, i32 1, <vscale x 32 x i1> %pg)
   ret void
 }
 
-define void @masked_store_split_32i16(<vscale x 32 x i16> %data, <vscale x 32 x i16> *%a, <vscale x 32 x i1> %pg) {
+define void @masked_store_split_32i16(<vscale x 32 x i16> %data, ptr %a, <vscale x 32 x i1> %pg) {
 ; CHECK-LABEL: masked_store_split_32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpkhi p2.h, p1.b
@@ -87,11 +87,11 @@ define void @masked_store_split_32i16(<vscale x 32 x i16> %data, <vscale x 32 x
 ; CHECK-NEXT:    st1h { z1.h }, p3, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv32i16(<vscale x 32 x i16> %data, <vscale x 32 x i16> *%a, i32 1, <vscale x 32 x i1> %pg)
+  call void @llvm.masked.store.nxv32i16(<vscale x 32 x i16> %data, ptr %a, i32 1, <vscale x 32 x i1> %pg)
   ret void
 }
 
-define void @masked_store_split_8i32(<vscale x 8 x i32> %data, <vscale x 8 x i32> *%a, <vscale x 8 x i1> %pg) {
+define void @masked_store_split_8i32(<vscale x 8 x i32> %data, ptr %a, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: masked_store_split_8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
@@ -99,11 +99,11 @@ define void @masked_store_split_8i32(<vscale x 8 x i32> %data, <vscale x 8 x i32
 ; CHECK-NEXT:    st1w { z1.s }, p1, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv8i32(<vscale x 8 x i32> %data, <vscale x 8 x i32> *%a, i32 1, <vscale x 8 x i1> %pg)
+  call void @llvm.masked.store.nxv8i32(<vscale x 8 x i32> %data, ptr %a, i32 1, <vscale x 8 x i1> %pg)
   ret void
 }
 
-define void @masked_store_split_8i64(<vscale x 8 x i64> %data, <vscale x 8 x i64> *%a, <vscale x 8 x i1> %pg) {
+define void @masked_store_split_8i64(<vscale x 8 x i64> %data, ptr %a, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: masked_store_split_8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
@@ -117,15 +117,15 @@ define void @masked_store_split_8i64(<vscale x 8 x i64> %data, <vscale x 8 x i64
 ; CHECK-NEXT:    st1d { z1.d }, p3, [x0, #1, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.nxv8i64(<vscale x 8 x i64> %data, <vscale x 8 x i64> *%a, i32 1, <vscale x 8 x i1> %pg)
+  call void @llvm.masked.store.nxv8i64(<vscale x 8 x i64> %data, ptr %a, i32 1, <vscale x 8 x i1> %pg)
   ret void
 }
 
-declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.store.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32, <vscale x 32 x i1>)
+declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.store.nxv32i8(<vscale x 32 x i8>, ptr, i32, <vscale x 32 x i1>)
 
-declare void @llvm.masked.store.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32, <vscale x 32 x i1>)
+declare void @llvm.masked.store.nxv32i16(<vscale x 32 x i16>, ptr, i32, <vscale x 32 x i1>)
 
-declare void @llvm.masked.store.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i32(<vscale x 8 x i32>, ptr, i32, <vscale x 8 x i1>)
 
-declare void @llvm.masked.store.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i64(<vscale x 8 x i64>, ptr, i32, <vscale x 8 x i1>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
index 9f6fdf6b39718..728041d3f916b 100644
--- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
@@ -3,106 +3,106 @@
 
 ; ST1B
 
-define void @st1b_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a) {
+define void @st1b_lower_bound(<vscale x 16 x i8> %data, ptr %a) {
 ; CHECK-LABEL: st1b_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
-  store <vscale x 16 x i8> %data, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -8
+  store <vscale x 16 x i8> %data, ptr %base
   ret void
 }
 
-define void @st1b_inbound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a) {
+define void @st1b_inbound(<vscale x 16 x i8> %data, ptr %a) {
 ; CHECK-LABEL: st1b_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1
-  store <vscale x 16 x i8> %data, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 1
+  store <vscale x 16 x i8> %data, ptr %base
   ret void
 }
 
-define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a) {
+define void @st1b_upper_bound(<vscale x 16 x i8> %data, ptr %a) {
 ; CHECK-LABEL: st1b_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
-  store <vscale x 16 x i8> %data, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 7
+  store <vscale x 16 x i8> %data, ptr %base
   ret void
 }
 
-define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a) {
+define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, ptr %a) {
 ; CHECK-LABEL: st1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rdvl x8, #8
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
-  store <vscale x 16 x i8> %data, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 8
+  store <vscale x 16 x i8> %data, ptr %base
   ret void
 }
 
-define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a) {
+define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, ptr %a) {
 ; CHECK-LABEL: st1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    rdvl x8, #-9
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
-  store <vscale x 16 x i8> %data, <vscale x 16 x i8>* %base
+  %base = getelementptr <vscale x 16 x i8>, ptr %a, i64 -9
+  store <vscale x 16 x i8> %data, ptr %base
   ret void
 }
 
 ; ST1H
 
-define void @st1h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i16>* %a) {
+define void @st1h_inbound(<vscale x 8 x i16> %data, ptr %a) {
 ; CHECK-LABEL: st1h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #-6, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 -6
-  store <vscale x 8 x i16> %data, <vscale x 8 x i16>* %base
+  %base = getelementptr <vscale x 8 x i16>, ptr %a, i64 -6
+  store <vscale x 8 x i16> %data, ptr %base
   ret void
 }
 
 ; ST1W
 
-define void @st1w_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i32>* %a) {
+define void @st1w_inbound(<vscale x 4 x i32> %data, ptr %a) {
 ; CHECK-LABEL: st1w_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 2
-  store <vscale x 4 x i32> %data, <vscale x 4 x i32>* %base
+  %base = getelementptr <vscale x 4 x i32>, ptr %a, i64 2
+  store <vscale x 4 x i32> %data, ptr %base
   ret void
 }
 
 ; ST1D
 
-define void @st1d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i64>* %a) {
+define void @st1d_inbound(<vscale x 2 x i64> %data, ptr %a) {
 ; CHECK-LABEL: st1d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #5, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 5
-  store <vscale x 2 x i64> %data, <vscale x 2 x i64>* %base
+  %base = getelementptr <vscale x 2 x i64>, ptr %a, i64 5
+  store <vscale x 2 x i64> %data, ptr %base
   ret void
 }
 
 
 ; Splat stores of unpacked FP scalable vectors
 
-define void @store_nxv2f32(<vscale x 2 x float>* %out) {
+define void @store_nxv2f32(ptr %out) {
 ; CHECK-LABEL: store_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -111,11 +111,11 @@ define void @store_nxv2f32(<vscale x 2 x float>* %out) {
 ; CHECK-NEXT:    ret
   %ins = insertelement <vscale x 2 x float> undef, float 1.0, i32 0
   %splat = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
-  store <vscale x 2 x float> %splat, <vscale x 2 x float>* %out
+  store <vscale x 2 x float> %splat, ptr %out
   ret void
 }
 
-define void @store_nxv4f16(<vscale x 4 x half>* %out) {
+define void @store_nxv4f16(ptr %out) {
 ; CHECK-LABEL: store_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -124,13 +124,13 @@ define void @store_nxv4f16(<vscale x 4 x half>* %out) {
 ; CHECK-NEXT:    ret
   %ins = insertelement <vscale x 4 x half> undef, half 1.0, i32 0
   %splat = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
-  store <vscale x 4 x half> %splat, <vscale x 4 x half>* %out
+  store <vscale x 4 x half> %splat, ptr %out
   ret void
 }
 
 ; Splat stores of unusual FP scalable vector types
 
-define void @store_nxv6f32(<vscale x 6 x float>* %out) {
+define void @store_nxv6f32(ptr %out) {
 ; CHECK-LABEL: store_nxv6f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -141,11 +141,11 @@ define void @store_nxv6f32(<vscale x 6 x float>* %out) {
 ; CHECK-NEXT:    ret
   %ins = insertelement <vscale x 6 x float> undef, float 1.0, i32 0
   %splat = shufflevector <vscale x 6 x float> %ins, <vscale x 6 x float> undef, <vscale x 6 x i32> zeroinitializer
-  store <vscale x 6 x float> %splat, <vscale x 6 x float>* %out
+  store <vscale x 6 x float> %splat, ptr %out
   ret void
 }
 
-define void @store_nxv12f16(<vscale x 12 x half>* %out) {
+define void @store_nxv12f16(ptr %out) {
 ; CHECK-LABEL: store_nxv12f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -156,6 +156,6 @@ define void @store_nxv12f16(<vscale x 12 x half>* %out) {
 ; CHECK-NEXT:    ret
   %ins = insertelement <vscale x 12 x half> undef, half 1.0, i32 0
   %splat = shufflevector <vscale x 12 x half> %ins, <vscale x 12 x half> undef, <vscale x 12 x i32> zeroinitializer
-  store <vscale x 12 x half> %splat, <vscale x 12 x half>* %out
+  store <vscale x 12 x half> %splat, ptr %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
index b659ded53a8c1..d859bbb567ebb 100644
--- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
@@ -10,7 +10,7 @@ define void @st1_nxv16i8(ptr %addr, i64 %off, <vscale x 16 x i8> %val) {
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  store <vscale x 16 x i8> %val, <vscale x 16 x i8>* %ptr
+  store <vscale x 16 x i8> %val, ptr %ptr
   ret void
 }
 
@@ -21,7 +21,7 @@ define void @st1_nxv16i8_bitcast_from_i16(ptr %addr, i64 %off, <vscale x 8 x i16
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  store <vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr
+  store <vscale x 8 x i16> %val, ptr %ptr
   ret void
 }
 
@@ -32,7 +32,7 @@ define void @st1_nxv16i8_bitcast_from_i32(ptr %addr, i64 %off, <vscale x 4 x i32
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  store <vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr
+  store <vscale x 4 x i32> %val, ptr %ptr
   ret void
 }
 
@@ -43,7 +43,7 @@ define void @st1_nxv16i8_bitcast_from_i64(ptr %addr, i64 %off, <vscale x 2 x i64
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
-  store <vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr
+  store <vscale x 2 x i64> %val, ptr %ptr
   ret void
 }
 
@@ -55,7 +55,7 @@ define void @st1_nxv8i16_trunc8(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
   %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
-  store <vscale x 8 x i8> %trunc, <vscale x 8 x i8>* %ptr
+  store <vscale x 8 x i8> %trunc, ptr %ptr
   ret void
 }
 
@@ -67,7 +67,7 @@ define void @st1_nxv4i32_trunc8(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
-  store <vscale x 4 x i8> %trunc, <vscale x 4 x i8>* %ptr
+  store <vscale x 4 x i8> %trunc, ptr %ptr
   ret void
 }
 
@@ -79,7 +79,7 @@ define void @st1_nxv2i64_trunc8(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
-  store <vscale x 2 x i8> %trunc, <vscale x 2 x i8>* %ptr
+  store <vscale x 2 x i8> %trunc, ptr %ptr
   ret void
 }
 
@@ -92,7 +92,7 @@ define void @st1_nxv8i16(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
-  store <vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr
+  store <vscale x 8 x i16> %val, ptr %ptr
   ret void
 }
 
@@ -104,7 +104,7 @@ define void @st1_nxv4i32_trunc16(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
-  store <vscale x 4 x i16> %trunc, <vscale x 4 x i16>* %ptr
+  store <vscale x 4 x i16> %trunc, ptr %ptr
   ret void
 }
 
@@ -116,7 +116,7 @@ define void @st1_nxv2i64_trunc16(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
-  store <vscale x 2 x i16> %trunc, <vscale x 2 x i16>* %ptr
+  store <vscale x 2 x i16> %trunc, ptr %ptr
   ret void
 }
 
@@ -127,7 +127,7 @@ define void @st1_nxv8f16(ptr %addr, i64 %off, <vscale x 8 x half> %val) {
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
-  store <vscale x 8 x half> %val, <vscale x 8 x half>* %ptr
+  store <vscale x 8 x half> %val, ptr %ptr
   ret void
 }
 
@@ -138,7 +138,7 @@ define void @st1_nxv8bf16(ptr %addr, i64 %off, <vscale x 8 x bfloat> %val) {
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
-  store <vscale x 8 x bfloat> %val, <vscale x 8 x bfloat>* %ptr
+  store <vscale x 8 x bfloat> %val, ptr %ptr
   ret void
 }
 
@@ -149,7 +149,7 @@ define void @st1_nxv4f16(ptr %addr, i64 %off, <vscale x 4 x half> %val) {
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
-  store <vscale x 4 x half> %val, <vscale x 4 x half>* %ptr
+  store <vscale x 4 x half> %val, ptr %ptr
   ret void
 }
 
@@ -160,7 +160,7 @@ define void @st1_nxv4bf16(ptr %addr, i64 %off, <vscale x 4 x bfloat> %val) {
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
-  store <vscale x 4 x bfloat> %val, <vscale x 4 x bfloat>* %ptr
+  store <vscale x 4 x bfloat> %val, ptr %ptr
   ret void
 }
 
@@ -171,7 +171,7 @@ define void @st1_nxv2f16(ptr %addr, i64 %off, <vscale x 2 x half> %val) {
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds half, ptr %addr, i64 %off
-  store <vscale x 2 x half> %val, <vscale x 2 x half>* %ptr
+  store <vscale x 2 x half> %val, ptr %ptr
   ret void
 }
 
@@ -182,7 +182,7 @@ define void @st1_nxv2bf16(ptr %addr, i64 %off, <vscale x 2 x bfloat> %val) {
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
-  store <vscale x 2 x bfloat> %val, <vscale x 2 x bfloat>* %ptr
+  store <vscale x 2 x bfloat> %val, ptr %ptr
   ret void
 }
 
@@ -195,7 +195,7 @@ define void @st1_nxv4i32(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
-  store <vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr
+  store <vscale x 4 x i32> %val, ptr %ptr
   ret void
 }
 
@@ -207,7 +207,7 @@ define void @st1_nxv2i64_trunc32(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
-  store <vscale x 2 x i32> %trunc, <vscale x 2 x i32>* %ptr
+  store <vscale x 2 x i32> %trunc, ptr %ptr
   ret void
 }
 
@@ -218,7 +218,7 @@ define void @st1_nxv4f32(ptr %addr, i64 %off, <vscale x 4 x float> %val) {
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
-  store <vscale x 4 x float> %val, <vscale x 4 x float>* %ptr
+  store <vscale x 4 x float> %val, ptr %ptr
   ret void
 }
 
@@ -229,7 +229,7 @@ define void @st1_nxv2f32(ptr %addr, i64 %off, <vscale x 2 x float> %val) {
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds float, ptr %addr, i64 %off
-  store <vscale x 2 x float> %val, <vscale x 2 x float>* %ptr
+  store <vscale x 2 x float> %val, ptr %ptr
   ret void
 }
 
@@ -242,7 +242,7 @@ define void @st1_nxv2i64(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds i64, ptr %addr, i64 %off
-  store <vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr
+  store <vscale x 2 x i64> %val, ptr %ptr
   ret void
 }
 
@@ -253,6 +253,6 @@ define void @st1_nxv2f64(ptr %addr, i64 %off, <vscale x 2 x double> %val) {
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
   %ptr = getelementptr inbounds double, ptr %addr, i64 %off
-  store <vscale x 2 x double> %val, <vscale x 2 x double>* %ptr
+  store <vscale x 2 x double> %val, ptr %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
index 3b1f19af90219..f2ba4a7cc3567 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ptest.ll
@@ -35,8 +35,8 @@ define i1 @ptest_v16i1(ptr %a, ptr %b) {
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
-  %v0 = bitcast ptr %a to <16 x float>*
-  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v0 = bitcast ptr %a to ptr
+  %v1 = load <16 x float>, ptr %v0, align 4
   %v2 = fcmp une <16 x float> %v1, zeroinitializer
   %v3 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v2)
   ret i1 %v3
@@ -92,11 +92,11 @@ define i1 @ptest_or_v16i1(ptr %a, ptr %b) {
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
-  %v0 = bitcast ptr %a to <16 x float>*
-  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v0 = bitcast ptr %a to ptr
+  %v1 = load <16 x float>, ptr %v0, align 4
   %v2 = fcmp une <16 x float> %v1, zeroinitializer
-  %v3 = bitcast float* %b to <16 x float>*
-  %v4 = load <16 x float>, <16 x float>* %v3, align 4
+  %v3 = bitcast ptr %b to ptr
+  %v4 = load <16 x float>, ptr %v3, align 4
   %v5 = fcmp une <16 x float> %v4, zeroinitializer
   %v6 = or <16 x i1> %v2, %v5
   %v7 = call i1 @llvm.vector.reduce.or.i1.v16i1 (<16 x i1> %v6)
@@ -159,11 +159,11 @@ define i1 @ptest_and_v16i1(ptr %a, ptr %b) {
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
-  %v0 = bitcast ptr %a to <16 x float>*
-  %v1 = load <16 x float>, <16 x float>* %v0, align 4
+  %v0 = bitcast ptr %a to ptr
+  %v1 = load <16 x float>, ptr %v0, align 4
   %v2 = fcmp une <16 x float> %v1, zeroinitializer
-  %v3 = bitcast float* %b to <16 x float>*
-  %v4 = load <16 x float>, <16 x float>* %v3, align 4
+  %v3 = bitcast ptr %b to ptr
+  %v4 = load <16 x float>, ptr %v3, align 4
   %v5 = fcmp une <16 x float> %v4, zeroinitializer
   %v6 = and <16 x i1> %v2, %v5
   %v7 = call i1 @llvm.vector.reduce.and.i1.v16i1 (<16 x i1> %v6)

diff  --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll
index dfa4a6148b86e..92dfc73961362 100644
--- a/llvm/test/CodeGen/AArch64/sve-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll
@@ -210,6 +210,6 @@ define void @trunc_promoteIntRes(<vscale x 4 x i64> %0, ptr %ptr) {
 ; CHECK-NEXT:    ret
 entry:
   %1 = trunc <vscale x 4 x i64> %0 to <vscale x 4 x i16>
-  store <vscale x 4 x i16> %1, <vscale x 4 x i16>* %ptr, align 2
+  store <vscale x 4 x i16> %1, ptr %ptr, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll b/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
index 738ff533744bc..fc0dd2b163369 100644
--- a/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-uunpklo-load-uzp1-store-combine.ll
@@ -205,12 +205,12 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8>, <v
 declare <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
 declare <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
 
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
 
-declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
-declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>)
+declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)
 
 attributes #0 = { "target-features"="+sve" vscale_range(8,0) }

diff  --git a/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll b/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
index 0f42fa10caef0..59f0faff25e32 100644
--- a/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
+++ b/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
@@ -14,7 +14,7 @@ entry:
   %0 = va_arg ptr %args, i32
   store i32 %0, ptr %vc, align 4
   %1 = va_arg ptr %args, <vscale x 4 x i32>
-  store <vscale x 4 x i32> %1, <vscale x 4 x i32>* %vv, align 16
+  store <vscale x 4 x i32> %1, ptr %vv, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-ld1-single.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-ld1-single.ll
index 1fbbab875403b..39ee4510d51b4 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-ld1-single.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-ld1-single.ll
@@ -13,30 +13,30 @@ define <vscale x 4 x i32> @test_svld1uwq_i32_ss(<vscale x 1 x i1> %pred, ptr %ba
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @test_svld1uwq_i32_si(<vscale x 1 x i1> %pred, <vscale x 4 x i32>* %base) {
+define <vscale x 4 x i32> @test_svld1uwq_i32_si(<vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svld1uwq_i32_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.q }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ld1w { z1.q }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x i32>, ptr %base, i64 -8
   %res1 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x i32>, ptr %base, i64 7
   %res2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1> %pred, ptr %gep2)
 
   %res = add <vscale x 4 x i32> %res1, %res2
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @test_svld1uwq_i32_out_of_bound(<vscale x 1 x i1> %pred, <vscale x 4 x i32>* %base) {
+define <vscale x 4 x i32> @test_svld1uwq_i32_out_of_bound(<vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svld1uwq_i32_out_of_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    addvl x8, x0, #2
 ; CHECK-NEXT:    ld1w { z0.q }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %gep = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 8
+  %gep = getelementptr inbounds <vscale x 1 x i32>, ptr %base, i64 8
   %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1uwq.nxv4i32(<vscale x 1 x i1> %pred, ptr %gep)
 
   ret <vscale x 4 x i32> %res
@@ -52,17 +52,17 @@ define <vscale x 4 x float> @test_svld1uwq_f32_ss(<vscale x 1 x i1> %pred, ptr %
   ret <vscale x 4 x float> %res
 }
 
-define <vscale x 4 x float> @test_svld1uwq_f32_si(<vscale x 1 x i1> %pred, <vscale x 1 x float>* %base) {
+define <vscale x 4 x float> @test_svld1uwq_f32_si(<vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svld1uwq_f32_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.q }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ld1w { z1.q }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x float>, <vscale x 1 x float>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x float>, ptr %base, i64 -8
   %res1 = call <vscale x 4 x float> @llvm.aarch64.sve.ld1uwq.nxv4f32(<vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x float>, <vscale x 1 x float>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x float>, ptr %base, i64 7
   %res2 = call <vscale x 4 x float> @llvm.aarch64.sve.ld1uwq.nxv4f32(<vscale x 1 x i1> %pred, ptr %gep2)
 
   %res = fadd <vscale x 4 x float> %res1, %res2
@@ -81,30 +81,30 @@ define <vscale x 2 x i64> @test_svld1udq_i64_ss(<vscale x 1 x i1> %pred, ptr %ba
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @test_svld1udq_i64_si(<vscale x 1 x i1> %pred, <vscale x 1 x i64>* %base) {
+define <vscale x 2 x i64> @test_svld1udq_i64_si(<vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svld1udq_i64_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.q }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ld1d { z1.q }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    add z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x i64>, ptr %base, i64 -8
   %res1 = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x i64>, ptr %base, i64 7
   %res2 = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1> %pred, ptr %gep2)
 
   %res = add <vscale x 2 x i64> %res1, %res2
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @test_svld1udq_i64_out_of_bound(<vscale x 1 x i1> %pred, <vscale x 1 x i64>* %base) {
+define <vscale x 2 x i64> @test_svld1udq_i64_out_of_bound(<vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svld1udq_i64_out_of_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    addvl x8, x0, #-5
 ; CHECK-NEXT:    ld1d { z0.q }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %gep = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 -10
+  %gep = getelementptr inbounds <vscale x 1 x i64>, ptr %base, i64 -10
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1udq.nxv2i64(<vscale x 1 x i1> %pred, ptr %gep)
 
   ret <vscale x 2 x i64> %res
@@ -120,17 +120,17 @@ define <vscale x 2 x double> @test_svld1udq_f64_ss(<vscale x 1 x i1> %pred, ptr
   ret <vscale x 2 x double> %res
 }
 
-define <vscale x 2 x double> @test_svld1udq_f64_si(<vscale x 1 x i1> %pred, <vscale x 1 x double>* %base) {
+define <vscale x 2 x double> @test_svld1udq_f64_si(<vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svld1udq_f64_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.q }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ld1d { z1.q }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x double>, <vscale x 1 x double>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x double>, ptr %base, i64 -8
   %res1 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1udq.nxv2f64(<vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x double>, <vscale x 1 x double>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x double>, ptr %base, i64 7
   %res2 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1udq.nxv2f64(<vscale x 1 x i1> %pred, ptr %gep2)
 
   %res = fadd <vscale x 2 x double> %res1, %res2

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-loads.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-loads.ll
index a5d6bd4f930e9..f15aa2ba02238 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-loads.ll
@@ -3,24 +3,24 @@
 
 ;;LD2Q
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2q_si_i8_off16(<vscale x 16 x i1> %pg, <vscale x 16 x i8> *%addr ) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2q_si_i8_off16(<vscale x 16 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_i8_off16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2q.sret.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2q_si_i8_off14(<vscale x 16 x i1> %pg, <vscale x 16 x i8> *%addr ) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2q_si_i8_off14(<vscale x 16 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_i8_off14:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 14
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2q.sret.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
@@ -44,13 +44,13 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2q_i8(<vscale x 16 x i1> %p
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2q_si_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> *%addr ) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2q_si_i16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2q.sret.nxv8i16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
@@ -103,13 +103,13 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2q_i32(<vscale x 4 x i1> %p
   ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2q_si_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> *%addr ) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2q_si_i64(<vscale x 2 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2q.sret.nxv2i64(<vscale x 2 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
@@ -133,13 +133,13 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2q_i64(<vscale x 2 x i1> %p
   ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2q_si_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> *%addr ) {
+define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2q_si_f16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2q.sret.nxv8f16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
@@ -163,13 +163,13 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2q_f16(<vscale x 8 x i1>
   ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2q_si_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> *%addr ) {
+define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2q_si_f32(<vscale x 4 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
- %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
+ %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2q.sret.nxv4f32(<vscale x 4 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
@@ -193,13 +193,13 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2q_f32(<vscale x 4 x i1
   ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2q_si_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> *%addr ) {
+define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2q_si_f64(<vscale x 2 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2q.sret.nxv2f64(<vscale x 2 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
@@ -223,13 +223,13 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2q_f64(<vscale x 2 x
   ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2q_si_bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> *%addr ) {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2q_si_bf16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld2q_si_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2q { z0.q, z1.q }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -16
-  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 -16
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2q.sret.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
@@ -254,24 +254,24 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2q_bf16(<vscale x 8 x
 }
 
 ;; LD3Q
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3q_si_i8_off24(<vscale x 16 x i1> %pg, <vscale x 16 x i8> *%addr ) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3q_si_i8_off24(<vscale x 16 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_i8_off24:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3q.sret.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3q_si_i8_off21(<vscale x 16 x i1> %pg, <vscale x 16 x i8> *%addr ) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3q_si_i8_off21(<vscale x 16 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_i8_off21:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 21
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3q.sret.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
@@ -295,13 +295,13 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @ld3q_i8(<v
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %res
 }
 
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3q_si_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> *%addr ) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3q_si_i16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3q.sret.nxv8i16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
@@ -325,13 +325,13 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3q_i16(
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3q_si_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> *%addr ) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3q_si_i32(<vscale x 4 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3q.sret.nxv4i32(<vscale x 4 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
@@ -384,13 +384,13 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3q_i64(
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3q_si_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> *%addr ) {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3q_si_f16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3q.sret.nxv8f16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
@@ -414,13 +414,13 @@ define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3q_f
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3q_si_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> *%addr ) {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3q_si_f32(<vscale x 4 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
- %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
+ %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3q.sret.nxv4f32(<vscale x 4 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
@@ -444,13 +444,13 @@ define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3q_si_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> *%addr ) {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3q_si_f64(<vscale x 2 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3q.sret.nxv2f64(<vscale x 2 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
@@ -474,13 +474,13 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3q_si_bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> *%addr ) {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3q_si_bf16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld3q_si_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3q { z0.q - z2.q }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -24
-  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 -24
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3q.sret.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
@@ -505,24 +505,24 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @
 }
 
 ;; LD4Q
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4q_si_i8_off32(<vscale x 16 x i1> %pg, <vscale x 16 x i8> *%addr ) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4q_si_i8_off32(<vscale x 16 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_i8_off32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4q.sret.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4q_si_i8_off28(<vscale x 16 x i1> %pg, <vscale x 16 x i8> *%addr ) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4q_si_i8_off28(<vscale x 16 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_i8_off28:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
-  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 28
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4q.sret.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
@@ -546,13 +546,13 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %res
 }
 
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4q_si_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> *%addr ) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4q_si_i16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4q.sret.nxv8i16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
@@ -576,13 +576,13 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4q_si_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> *%addr ) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4q_si_i32(<vscale x 4 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4q.sret.nxv4i32(<vscale x 4 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
@@ -606,13 +606,13 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4q_si_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> *%addr ) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4q_si_i64(<vscale x 2 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4q.sret.nxv2i64(<vscale x 2 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
@@ -636,13 +636,13 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4q_si_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> *%addr ) {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4q_si_f16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4q.sret.nxv8f16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
@@ -666,13 +666,13 @@ define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4q_si_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> *%addr ) {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4q_si_f32(<vscale x 4 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
- %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
+ %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4q.sret.nxv4f32(<vscale x 4 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
@@ -696,13 +696,13 @@ define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vsca
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4q_si_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> *%addr ) {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4q_si_f64(<vscale x 2 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4q.sret.nxv2f64(<vscale x 2 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
@@ -726,13 +726,13 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <v
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4q_si_bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> *%addr ) {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4q_si_bf16(<vscale x 8 x i1> %pg, ptr %addr ) {
 ; CHECK-LABEL: ld4q_si_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4q { z0.q - z3.q }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -32
-  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 -32
+  %base_ptr = bitcast ptr %base to ptr
   %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4q.sret.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base_ptr);
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-stores.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-stores.ll
index 48ec0161cb8b8..8fe0694808c8e 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-multivec-stores.ll
@@ -124,33 +124,33 @@ define void @st2q_ss_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1,
 }
 
 
-define void @st2q_si_i8_off16(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2q_si_i8_off16(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2q_si_i8_off16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2q { z0.q, z1.q }, p0, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -16
   call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> %v0,
                                            <vscale x 16 x i8> %v1,
                                            <vscale x 16 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st2q_si_i8_off14(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st2q_si_i8_off14(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2q_si_i8_off14:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2q { z0.q, z1.q }, p0, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 14
   call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> %v0,
                                            <vscale x 16 x i8> %v1,
                                            <vscale x 16 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
@@ -165,7 +165,7 @@ define void @st2q_si_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale
   call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          i8* %gep)
+                                          ptr %gep)
   ret void
 }
 
@@ -180,7 +180,7 @@ define void @st2q_si_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale
   call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %gep)
+                                          ptr %gep)
   ret void
 }
 
@@ -195,7 +195,7 @@ define void @st2q_si_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale
   call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %gep)
+                                          ptr %gep)
   ret void
 }
 
@@ -210,7 +210,7 @@ define void @st2q_si_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vsca
   call void @llvm.aarch64.sve.st2q.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          half* %gep)
+                                          ptr %gep)
   ret void
 }
 
@@ -225,7 +225,7 @@ define void @st2q_si_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vs
   call void @llvm.aarch64.sve.st2q.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          float* %gep)
+                                          ptr %gep)
   ret void
 }
 
@@ -240,7 +240,7 @@ define void @st2q_si_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <
   call void @llvm.aarch64.sve.st2q.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          double* %gep)
+                                          ptr %gep)
   ret void
 }
 
@@ -255,7 +255,7 @@ define void @st2q_si_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1,
   call void @llvm.aarch64.sve.st2q.nxv8bf16(<vscale x 8 x bfloat> %v0,
                                           <vscale x 8 x bfloat> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          bfloat* %gep)
+                                          ptr %gep)
   ret void
 }
 
@@ -399,7 +399,7 @@ define void @st3q_ss_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1,
   ret void
 }
 
-define void @st3q_si_i8_off24(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3q_si_i8_off24(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_i8_off24:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -407,16 +407,16 @@ define void @st3q_si_i8_off24(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -24
   call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> %v0,
                                            <vscale x 16 x i8> %v1,
                                            <vscale x 16 x i8> %v2,
                                            <vscale x 16 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_i8_off21(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st3q_si_i8_off21(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_i8_off21:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -424,16 +424,16 @@ define void @st3q_si_i8_off21(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> %v0,
                                            <vscale x 16 x i8> %v1,
                                            <vscale x 16 x i8> %v2,
                                            <vscale x 16 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2,  <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
+define void @st3q_si_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2,  <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -441,16 +441,16 @@ define void @st3q_si_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 21
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> %v0,
                                            <vscale x 8 x i16> %v1,
                                            <vscale x 8 x i16> %v2,
                                            <vscale x 8 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
+define void @st3q_si_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -458,16 +458,16 @@ define void @st3q_si_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 21
+  %base = getelementptr <vscale x 4 x i32>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> %v0,
                                            <vscale x 4 x i32> %v1,
                                            <vscale x 4 x i32> %v2,
                                            <vscale x 4 x i1> %pred,
-                                           i32* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1,<vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
+define void @st3q_si_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1,<vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -475,16 +475,16 @@ define void @st3q_si_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1,<vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 21
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> %v0,
                                            <vscale x 2 x i64> %v1,
                                            <vscale x 2 x i64> %v2,
                                            <vscale x 2 x i1> %pred,
-                                           i64* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
+define void @st3q_si_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -492,16 +492,16 @@ define void @st3q_si_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vsca
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 21
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv8f16(<vscale x 8 x half> %v0,
                                            <vscale x 8 x half> %v1,
                                            <vscale x 8 x half> %v2,
                                            <vscale x 8 x i1> %pred,
-                                           half* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
+define void @st3q_si_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -509,16 +509,16 @@ define void @st3q_si_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vs
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 21
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv4f32(<vscale x 4 x float> %v0,
                                            <vscale x 4 x float> %v1,
                                            <vscale x 4 x float> %v2,
                                            <vscale x 4 x i1> %pred,
-                                           float* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
+define void @st3q_si_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -526,16 +526,16 @@ define void @st3q_si_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 21
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv2f64(<vscale x 2 x double> %v0,
                                            <vscale x 2 x double> %v1,
                                            <vscale x 2 x double> %v2,
                                            <vscale x 2 x i1> %pred,
-                                           double* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st3q_si_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x i1> %pred, <vscale x 8 x bfloat>* %addr) {
+define void @st3q_si_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3q_si_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -543,12 +543,12 @@ define void @st3q_si_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1,
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3q { z0.q - z2.q }, p0, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 21
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 21
   call void @llvm.aarch64.sve.st3q.nxv8bf16(<vscale x 8 x bfloat> %v0,
                                             <vscale x 8 x bfloat> %v1,
                                             <vscale x 8 x bfloat> %v2,
                                             <vscale x 8 x i1> %pred,
-                                            bfloat* %base)
+                                            ptr %base)
   ret void
 }
 
@@ -707,7 +707,7 @@ define void @st4q_ss_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1,
   ret void
 }
 
-define void @st4q_si_i8_off32(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2,<vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4q_si_i8_off32(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2,<vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_i8_off32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -716,17 +716,17 @@ define void @st4q_si_i8_off32(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 -32
   call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> %v0,
                                            <vscale x 16 x i8> %v1,
                                            <vscale x 16 x i8> %v2,
                                            <vscale x 16 x i8> %v3,
                                            <vscale x 16 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st4q_si_i8_off28(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2,<vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
+define void @st4q_si_i8_off28(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2,<vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_i8_off28:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -735,17 +735,17 @@ define void @st4q_si_i8_off28(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <v
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
+  %base = getelementptr <vscale x 16 x i8>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> %v0,
                                            <vscale x 16 x i8> %v1,
                                            <vscale x 16 x i8> %v2,
                                            <vscale x 16 x i8> %v3,
                                            <vscale x 16 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st4q_si_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3,  <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
+define void @st4q_si_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3,  <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -754,17 +754,17 @@ define void @st4q_si_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 28
+  %base = getelementptr <vscale x 8 x i16>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> %v0,
                                            <vscale x 8 x i16> %v1,
                                            <vscale x 8 x i16> %v2,
                                            <vscale x 8 x i16> %v3,
                                            <vscale x 8 x i1> %pred,
-                                           i8* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st4q_si_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
+define void @st4q_si_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -773,17 +773,17 @@ define void @st4q_si_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base1 = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 28
+  %base1 = getelementptr <vscale x 4 x i32>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> %v0,
                                            <vscale x 4 x i32> %v1,
                                            <vscale x 4 x i32> %v2,
                                            <vscale x 4 x i32> %v3,
                                            <vscale x 4 x i1> %pred,
-                                           i32* %base1)
+                                           ptr %base1)
   ret void
 }
 
-define void @st4q_si_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
+define void @st4q_si_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -792,17 +792,17 @@ define void @st4q_si_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 28
+  %base = getelementptr <vscale x 2 x i64>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> %v0,
                                            <vscale x 2 x i64> %v1,
                                            <vscale x 2 x i64> %v2,
                                            <vscale x 2 x i64> %v3,
                                            <vscale x 2 x i1> %pred,
-                                           i64* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st4q_si_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
+define void @st4q_si_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -811,17 +811,17 @@ define void @st4q_si_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vsca
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 28
+  %base = getelementptr <vscale x 8 x half>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv8f16(<vscale x 8 x half> %v0,
                                            <vscale x 8 x half> %v1,
                                            <vscale x 8 x half> %v2,
                                            <vscale x 8 x half> %v3,
                                            <vscale x 8 x i1> %pred,
-                                           half* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st4q_si_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2,<vscale x 4 x float> %v3,  <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
+define void @st4q_si_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2,<vscale x 4 x float> %v3,  <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -830,17 +830,17 @@ define void @st4q_si_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vs
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 28
+  %base = getelementptr <vscale x 4 x float>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv4f32(<vscale x 4 x float> %v0,
                                            <vscale x 4 x float> %v1,
                                            <vscale x 4 x float> %v2,
                                            <vscale x 4 x float> %v3,
                                            <vscale x 4 x i1> %pred,
-                                           float* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st4q_si_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
+define void @st4q_si_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -849,17 +849,17 @@ define void @st4q_si_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 28
+  %base = getelementptr <vscale x 2 x double>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv2f64(<vscale x 2 x double> %v0,
                                            <vscale x 2 x double> %v1,
                                            <vscale x 2 x double> %v2,
                                            <vscale x 2 x double> %v3,
                                            <vscale x 2 x i1> %pred,
-                                           double* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st4q_si_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x i1> %pred, <vscale x 8 x bfloat>* %addr) {
+define void @st4q_si_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4q_si_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -868,13 +868,13 @@ define void @st4q_si_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1,
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4q { z0.q - z3.q }, p0, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 28
+  %base = getelementptr <vscale x 8 x bfloat>, ptr %addr, i64 28
   call void @llvm.aarch64.sve.st4q.nxv8bf16(<vscale x 8 x bfloat> %v0,
                                             <vscale x 8 x bfloat> %v1,
                                             <vscale x 8 x bfloat> %v2,
                                             <vscale x 8 x bfloat> %v3,
                                             <vscale x 8 x i1> %pred,
-                                            bfloat* %base)
+                                            ptr %base)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-st1-single.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-st1-single.ll
index 894c647453f54..4ffc0b42d0711 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-st1-single.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-st1-single.ll
@@ -13,27 +13,27 @@ define void @test_svst1wq_i32_ss(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred
   ret void
 }
 
-define void @test_svst1wq_i32_si(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred, <vscale x 1 x i32>* %base) {
+define void @test_svst1wq_i32_si(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svst1wq_i32_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.q }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1w { z0.q }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x i32>, ptr %base, i64 -8
   call void @llvm.aarch64.sve.st1wq.nxv4i32(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x i32>, ptr %base, i64 7
   call void @llvm.aarch64.sve.st1wq.nxv4i32(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred, ptr %gep2)
   ret void
 }
 
-define void @test_svst1wq_i32_out_of_bound(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred, <vscale x 1 x i32>* %base) {
+define void @test_svst1wq_i32_out_of_bound(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svst1wq_i32_out_of_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    addvl x8, x0, #2
 ; CHECK-NEXT:    st1w { z0.q }, p0, [x8]
 ; CHECK-NEXT:    ret
-  %gep = getelementptr inbounds <vscale x 1 x i32>, <vscale x 1 x i32>* %base, i64 8
+  %gep = getelementptr inbounds <vscale x 1 x i32>, ptr %base, i64 8
   call void @llvm.aarch64.sve.st1wq.nxv4i32(<vscale x 4 x i32> %zt, <vscale x 1 x i1> %pred, ptr %gep)
   ret void
 }
@@ -48,16 +48,16 @@ define void @test_svst1wq_f32_ss(<vscale x 4 x float> %zt, <vscale x 1 x i1> %pr
   ret void
 }
 
-define void @test_svst1wq_f32_si(<vscale x 4 x float> %zt, <vscale x 1 x i1> %pred, <vscale x 1 x float>* %base) {
+define void @test_svst1wq_f32_si(<vscale x 4 x float> %zt, <vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svst1wq_f32_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.q }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1w { z0.q }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x float>, <vscale x 1 x float>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x float>, ptr %base, i64 -8
   call void @llvm.aarch64.sve.st1wq.nxv4f32(<vscale x 4 x float> %zt, <vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x float>, <vscale x 1 x float>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x float>, ptr %base, i64 7
   call void @llvm.aarch64.sve.st1wq.nxv4f32(<vscale x 4 x float> %zt, <vscale x 1 x i1> %pred, ptr %gep2)
   ret void
 }
@@ -74,27 +74,27 @@ define void @test_svst1dq_i64_ss(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred
   ret void
 }
 
-define void @test_svst1dq_i64_si(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred, <vscale x 1 x i64>* %base) {
+define void @test_svst1dq_i64_si(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svst1dq_i64_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.q }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1d { z0.q }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x i64>, ptr %base, i64 -8
   call void @llvm.aarch64.sve.st1dq.nxv2i64(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x i64>, ptr %base, i64 7
   call void @llvm.aarch64.sve.st1dq.nxv2i64(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred, ptr %gep2)
   ret void
 }
 
-define void @test_svst1dq_i64_out_of_bound(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred, <vscale x 1 x i64>* %base) {
+define void @test_svst1dq_i64_out_of_bound(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svst1dq_i64_out_of_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    addvl x8, x0, #-5
 ; CHECK-NEXT:    st1d { z0.q }, p0, [x8]
 ; CHECK-NEXT:    ret
-  %gep = getelementptr inbounds <vscale x 1 x i64>, <vscale x 1 x i64>* %base, i64 -10
+  %gep = getelementptr inbounds <vscale x 1 x i64>, ptr %base, i64 -10
   call void @llvm.aarch64.sve.st1dq.nxv2i64(<vscale x 2 x i64> %zt, <vscale x 1 x i1> %pred, ptr %gep)
   ret void
 }
@@ -109,16 +109,16 @@ define void @test_svst1dq_f64_ss(<vscale x 2 x double> %zt, <vscale x 1 x i1> %p
   ret void
 }
 
-define void @test_svst1dq_f64_si(<vscale x 2 x double> %zt, <vscale x 1 x i1> %pred, <vscale x 1 x double>* %base) {
+define void @test_svst1dq_f64_si(<vscale x 2 x double> %zt, <vscale x 1 x i1> %pred, ptr %base) {
 ; CHECK-LABEL: test_svst1dq_f64_si:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.q }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    st1d { z0.q }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %gep1 = getelementptr inbounds <vscale x 1 x double>, <vscale x 1 x double>* %base, i64 -8
+  %gep1 = getelementptr inbounds <vscale x 1 x double>, ptr %base, i64 -8
   call void @llvm.aarch64.sve.st1dq.nxv2f64(<vscale x 2 x double> %zt, <vscale x 1 x i1> %pred, ptr %gep1)
 
-  %gep2 = getelementptr inbounds <vscale x 1 x double>, <vscale x 1 x double>* %base, i64 7
+  %gep2 = getelementptr inbounds <vscale x 1 x double>, ptr %base, i64 7
   call void @llvm.aarch64.sve.st1dq.nxv2f64(<vscale x 2 x double> %zt, <vscale x 1 x i1> %pred, ptr %gep2)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/swift-error-unreachable-use.ll b/llvm/test/CodeGen/AArch64/swift-error-unreachable-use.ll
index 81fabc3f175c3..d3abc27a53dad 100644
--- a/llvm/test/CodeGen/AArch64/swift-error-unreachable-use.ll
+++ b/llvm/test/CodeGen/AArch64/swift-error-unreachable-use.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple aarch64-apple-macosx %s -filetype asm -o - | FileCheck %s
 ; Regression test for https://github.com/llvm/llvm-project/issues/59751
 
-define void @"func"(i32** swifterror %0) #0 {
+define void @"func"(ptr swifterror %0) #0 {
 ; CHECK-LABEL: func:
 ; CHECK:       {{.*}}%bb.0:
 ; CHECK-NEXT:    b {{\.?}}LBB0_2
@@ -23,9 +23,9 @@ common.ret:
   ret void
 
 UelOc2l.exit:
-  %a = getelementptr inbounds [754 x i8*], [754 x i8*]* undef, i32 undef, i32 undef
-  %b = load i8*, i8** %a, align 8
-  %c = bitcast i8* %b to void ()*
+  %a = getelementptr inbounds [754 x ptr], ptr undef, i32 undef, i32 undef
+  %b = load ptr, ptr %a, align 8
+  %c = bitcast ptr %b to ptr
   call void %c()
   br label %common.ret
 

diff  --git a/llvm/test/CodeGen/AArch64/taildup-addrtaken.mir b/llvm/test/CodeGen/AArch64/taildup-addrtaken.mir
index f201db74e9745..5acebceff291d 100644
--- a/llvm/test/CodeGen/AArch64/taildup-addrtaken.mir
+++ b/llvm/test/CodeGen/AArch64/taildup-addrtaken.mir
@@ -6,10 +6,10 @@
   target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
   target triple = "aarch64-apple-ios"
   
-  @bb1 = global i8* blockaddress(@foo, %final)
-  @bb2 = global i8* inttoptr (i32 1 to i8*)
+  @bb1 = global ptr blockaddress(@foo, %final)
+  @bb2 = global ptr inttoptr (i32 1 to ptr)
   
-  define void @foo(i1 %tst, i32* %ptr) {
+  define void @foo(i1 %tst, ptr %ptr) {
     br i1 %tst, label %left, label %right
   
   left:                                             ; preds = %0
@@ -22,7 +22,7 @@
   
   next:                                             ; preds = %right, %left
     %val = phi i32 [ %val.left, %left ], [ %val.right, %right ]
-    store i32 %val, i32* %ptr, align 4
+    store i32 %val, ptr %ptr, align 4
     br label %final
   
   final:                                            ; preds = %next

diff  --git a/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll b/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
index 52540bc9c0274..54c200ec4c3d5 100644
--- a/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
+++ b/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
@@ -6,7 +6,7 @@
 ; CHECK-NEXT:  LBB0_8:
 ; CHECK-NEXT:    mov	 x8, x9
 ; CHECK-NEXT:  LBB0_9:
-define i64 @test(i64 %n, i64* %a, i64* %b, i64* %c, i64* %d, i64* %e, i64* %f) {
+define i64 @test(i64 %n, ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f) {
 entry:
   %cmp28 = icmp sgt i64 %n, 1
   br i1 %cmp28, label %for.body, label %for.end
@@ -14,10 +14,10 @@ entry:
 for.body:                                         ; preds = %for.body.lr.ph, %if.end
   %j = phi i64 [ %n, %entry ], [ %div, %if.end ]
   %div = lshr i64 %j, 1
-  %a.arrayidx = getelementptr inbounds i64, i64* %a, i64 %div
-  %a.j = load i64, i64* %a.arrayidx
-  %b.arrayidx = getelementptr inbounds i64, i64* %b, i64 %div
-  %b.j = load i64, i64* %b.arrayidx
+  %a.arrayidx = getelementptr inbounds i64, ptr %a, i64 %div
+  %a.j = load i64, ptr %a.arrayidx
+  %b.arrayidx = getelementptr inbounds i64, ptr %b, i64 %div
+  %b.j = load i64, ptr %b.arrayidx
   %cmp.i = icmp slt i64 %a.j, %b.j
   br i1 %cmp.i, label %for.end.loopexit, label %cond.false.i
 
@@ -26,10 +26,10 @@ cond.false.i:                                     ; preds = %for.body
   br i1 %cmp4.i, label %if.end, label %cond.false6.i
 
 cond.false6.i:                                    ; preds = %cond.false.i
-  %c.arrayidx = getelementptr inbounds i64, i64* %c, i64 %div
-  %c.j = load i64, i64* %c.arrayidx
-  %d.arrayidx = getelementptr inbounds i64, i64* %d, i64 %div
-  %d.j = load i64, i64* %d.arrayidx
+  %c.arrayidx = getelementptr inbounds i64, ptr %c, i64 %div
+  %c.j = load i64, ptr %c.arrayidx
+  %d.arrayidx = getelementptr inbounds i64, ptr %d, i64 %div
+  %d.j = load i64, ptr %d.arrayidx
   %cmp9.i = icmp slt i64 %c.j, %d.j
   br i1 %cmp9.i, label %for.end.loopexit, label %cond.false11.i
 
@@ -38,10 +38,10 @@ cond.false11.i:                                   ; preds = %cond.false6.i
   br i1 %cmp14.i, label %if.end, label %cond.false12.i
 
 cond.false12.i:                           ; preds = %cond.false11.i
-  %e.arrayidx = getelementptr inbounds i64, i64* %e, i64 %div
-  %e.j = load i64, i64* %e.arrayidx
-  %f.arrayidx = getelementptr inbounds i64, i64* %f, i64 %div
-  %f.j = load i64, i64* %f.arrayidx
+  %e.arrayidx = getelementptr inbounds i64, ptr %e, i64 %div
+  %e.j = load i64, ptr %e.arrayidx
+  %f.arrayidx = getelementptr inbounds i64, ptr %f, i64 %div
+  %f.j = load i64, ptr %f.arrayidx
   %cmp19.i = icmp sgt i64 %e.j, %f.j
   br i1 %cmp19.i, label %if.end, label %for.end.loopexit
 

diff  --git a/llvm/test/CodeGen/AArch64/tiny-model-pic.ll b/llvm/test/CodeGen/AArch64/tiny-model-pic.ll
index 3b878aba042b6..29f813ac974f2 100644
--- a/llvm/test/CodeGen/AArch64/tiny-model-pic.ll
+++ b/llvm/test/CodeGen/AArch64/tiny-model-pic.ll
@@ -7,7 +7,7 @@
 
 @src = external local_unnamed_addr global [65536 x i8], align 1
 @dst = external global [65536 x i8], align 1
- at ptr = external local_unnamed_addr global i8*, align 8
+ at ptr = external local_unnamed_addr global ptr, align 8
 
 define dso_preemptable void @foo1() {
 ; CHECK-LABEL: foo1:
@@ -42,8 +42,8 @@ define dso_preemptable void @foo1() {
 ; CHECK-PIC-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @src, i64 0, i64 0), align 1
-  store i8 %0, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @dst, i64 0, i64 0), align 1
+  %0 = load i8, ptr @src, align 1
+  store i8 %0, ptr @dst, align 1
   ret void
 }
 
@@ -76,7 +76,7 @@ define dso_preemptable void @foo2() {
 ; CHECK-PIC-GLOBISEL-NEXT:    str x9, [x8]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  store i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @dst, i64 0, i64 0), i8** @ptr, align 8
+  store ptr @dst, ptr @ptr, align 8
   ret void
 }
 
@@ -119,15 +119,15 @@ define dso_preemptable void @foo3() {
 ; CHECK-PIC-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @src, i64 0, i64 0), align 1
-  %1 = load i8*, i8** @ptr, align 8
-  store i8 %0, i8* %1, align 1
+  %0 = load i8, ptr @src, align 1
+  %1 = load ptr, ptr @ptr, align 8
+  store i8 %0, ptr %1, align 1
   ret void
 }
 
 @lsrc = internal global i8 0, align 4
 @ldst = internal global i8 0, align 4
- at lptr = internal global i8* null, align 8
+ at lptr = internal global ptr null, align 8
 
 define dso_preemptable void @bar1() {
 ; CHECK-LABEL: bar1:
@@ -162,8 +162,8 @@ define dso_preemptable void @bar1() {
 ; CHECK-PIC-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* @lsrc, align 4
-  store i8 %0, i8* @ldst, align 4
+  %0 = load i8, ptr @lsrc, align 4
+  store i8 %0, ptr @ldst, align 4
   ret void
 }
 
@@ -196,7 +196,7 @@ define dso_preemptable void @bar2() {
 ; CHECK-PIC-GLOBISEL-NEXT:    str x9, [x8]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  store i8* @ldst, i8** @lptr, align 8
+  store ptr @ldst, ptr @lptr, align 8
   ret void
 }
 
@@ -237,9 +237,9 @@ define dso_preemptable void @bar3() {
 ; CHECK-PIC-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* @lsrc, align 4
-  %1 = load i8*, i8** @lptr, align 8
-  store i8 %0, i8* %1, align 1
+  %0 = load i8, ptr @lsrc, align 4
+  %1 = load ptr, ptr @lptr, align 8
+  store i8 %0, ptr %1, align 1
   ret void
 }
 
@@ -280,8 +280,8 @@ define dso_preemptable void @baz1() {
 ; CHECK-PIC-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbsrc, i64 0, i64 0), align 4
-  store i8 %0, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbdst, i64 0, i64 0), align 4
+  %0 = load i8, ptr @lbsrc, align 4
+  store i8 %0, ptr @lbdst, align 4
   ret void
 }
 
@@ -314,7 +314,7 @@ define dso_preemptable void @baz2() {
 ; CHECK-PIC-GLOBISEL-NEXT:    str x9, [x8]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  store i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbdst, i64 0, i64 0), i8** @lptr, align 8
+  store ptr @lbdst, ptr @lptr, align 8
   ret void
 }
 
@@ -355,16 +355,16 @@ define dso_preemptable void @baz3() {
 ; CHECK-PIC-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbsrc, i64 0, i64 0), align 4
-  %1 = load i8*, i8** @lptr, align 8
-  store i8 %0, i8* %1, align 1
+  %0 = load i8, ptr @lbsrc, align 4
+  %1 = load ptr, ptr @lptr, align 8
+  store i8 %0, ptr %1, align 1
   ret void
 }
 
 
 declare void @func(...)
 
-define dso_preemptable i8* @externfuncaddr() {
+define dso_preemptable ptr @externfuncaddr() {
 ; CHECK-LABEL: externfuncaddr:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x0, :got:func
@@ -385,10 +385,10 @@ define dso_preemptable i8* @externfuncaddr() {
 ; CHECK-PIC-GLOBISEL-NEXT:    ldr x0, :got:func
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-      ret i8* bitcast (void (...)* @func to i8*)
+      ret ptr @func
 }
 
-define dso_preemptable i8* @localfuncaddr() {
+define dso_preemptable ptr @localfuncaddr() {
 ; CHECK-LABEL: localfuncaddr:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adr x0, externfuncaddr
@@ -409,6 +409,6 @@ define dso_preemptable i8* @localfuncaddr() {
 ; CHECK-PIC-GLOBISEL-NEXT:    ldr x0, :got:externfuncaddr
 ; CHECK-PIC-GLOBISEL-NEXT:    ret
 entry:
-      ret i8* bitcast (i8* ()* @externfuncaddr to i8*)
+      ret ptr @externfuncaddr
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/tiny-model-static.ll b/llvm/test/CodeGen/AArch64/tiny-model-static.ll
index eb27cf8a32db4..a62d4a07cc535 100644
--- a/llvm/test/CodeGen/AArch64/tiny-model-static.ll
+++ b/llvm/test/CodeGen/AArch64/tiny-model-static.ll
@@ -7,7 +7,7 @@
 
 @src = external local_unnamed_addr global [65536 x i8], align 1
 @dst = external global [65536 x i8], align 1
- at ptr = external local_unnamed_addr global i8*, align 8
+ at ptr = external local_unnamed_addr global ptr, align 8
 
 define dso_local void @foo1() {
 ; CHECK-LABEL: foo1:
@@ -26,8 +26,8 @@ define dso_local void @foo1() {
 ; CHECK-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @src, i64 0, i64 0), align 1
-  store i8 %0, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @dst, i64 0, i64 0), align 1
+  %0 = load i8, ptr @src, align 1
+  store i8 %0, ptr @dst, align 1
   ret void
 }
 
@@ -46,7 +46,7 @@ define dso_local void @foo2() {
 ; CHECK-GLOBISEL-NEXT:    str x9, [x8]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  store i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @dst, i64 0, i64 0), i8** @ptr, align 8
+  store ptr @dst, ptr @ptr, align 8
   ret void
 }
 
@@ -71,15 +71,15 @@ define dso_local void @foo3() {
 ; CHECK-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @src, i64 0, i64 0), align 1
-  %1 = load i8*, i8** @ptr, align 8
-  store i8 %0, i8* %1, align 1
+  %0 = load i8, ptr @src, align 1
+  %1 = load ptr, ptr @ptr, align 8
+  store i8 %0, ptr %1, align 1
   ret void
 }
 
 @lsrc = internal global i8 0, align 4
 @ldst = internal global i8 0, align 4
- at lptr = internal global i8* null, align 8
+ at lptr = internal global ptr null, align 8
 
 define dso_local void @bar1() {
 ; CHECK-LABEL: bar1:
@@ -98,8 +98,8 @@ define dso_local void @bar1() {
 ; CHECK-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* @lsrc, align 4
-  store i8 %0, i8* @ldst, align 4
+  %0 = load i8, ptr @lsrc, align 4
+  store i8 %0, ptr @ldst, align 4
   ret void
 }
 
@@ -118,7 +118,7 @@ define dso_local void @bar2() {
 ; CHECK-GLOBISEL-NEXT:    str x9, [x8]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  store i8* @ldst, i8** @lptr, align 8
+  store ptr @ldst, ptr @lptr, align 8
   ret void
 }
 
@@ -142,9 +142,9 @@ define dso_local void @bar3() {
 ; CHECK-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* @lsrc, align 4
-  %1 = load i8*, i8** @lptr, align 8
-  store i8 %0, i8* %1, align 1
+  %0 = load i8, ptr @lsrc, align 4
+  %1 = load ptr, ptr @lptr, align 8
+  store i8 %0, ptr %1, align 1
   ret void
 }
 
@@ -169,8 +169,8 @@ define dso_local void @baz1() {
 ; CHECK-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbsrc, i64 0, i64 0), align 4
-  store i8 %0, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbdst, i64 0, i64 0), align 4
+  %0 = load i8, ptr @lbsrc, align 4
+  store i8 %0, ptr @lbdst, align 4
   ret void
 }
 
@@ -189,7 +189,7 @@ define dso_local void @baz2() {
 ; CHECK-GLOBISEL-NEXT:    str x9, [x8]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  store i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbdst, i64 0, i64 0), i8** @lptr, align 8
+  store ptr @lbdst, ptr @lptr, align 8
   ret void
 }
 
@@ -213,16 +213,16 @@ define dso_local void @baz3() {
 ; CHECK-GLOBISEL-NEXT:    strb w8, [x9]
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds ([65536 x i8], [65536 x i8]* @lbsrc, i64 0, i64 0), align 4
-  %1 = load i8*, i8** @lptr, align 8
-  store i8 %0, i8* %1, align 1
+  %0 = load i8, ptr @lbsrc, align 4
+  %1 = load ptr, ptr @lptr, align 8
+  store i8 %0, ptr %1, align 1
   ret void
 }
 
 
 declare void @func(...)
 
-define dso_local i8* @externfuncaddr() {
+define dso_local ptr @externfuncaddr() {
 ; CHECK-LABEL: externfuncaddr:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x0, :got:func
@@ -233,10 +233,10 @@ define dso_local i8* @externfuncaddr() {
 ; CHECK-GLOBISEL-NEXT:    ldr x0, :got:func
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-      ret i8* bitcast (void (...)* @func to i8*)
+      ret ptr @func
 }
 
-define dso_local i8* @localfuncaddr() {
+define dso_local ptr @localfuncaddr() {
 ; CHECK-LABEL: localfuncaddr:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adr x0, externfuncaddr
@@ -247,5 +247,5 @@ define dso_local i8* @localfuncaddr() {
 ; CHECK-GLOBISEL-NEXT:    adr x0, externfuncaddr
 ; CHECK-GLOBISEL-NEXT:    ret
 entry:
-      ret i8* bitcast (i8* ()* @externfuncaddr to i8*)
+      ret ptr @externfuncaddr
 }

diff  --git a/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir b/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
index 276ba08834a0a..7faab8c45aa7a 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved-from-mir.mir
@@ -17,7 +17,7 @@
     ret <4 x i32> %result
 
   .Lunwind:                                         ; preds = %0
-    %lp = landingpad { i8*, i32 }
+    %lp = landingpad { ptr, i32 }
             cleanup
     ret <4 x i32> %v
   }

diff  --git a/llvm/test/CodeGen/AArch64/v3f-to-int.ll b/llvm/test/CodeGen/AArch64/v3f-to-int.ll
index 9c9dd5ed7e98e..a3c9c8fbe0a3b 100644
--- a/llvm/test/CodeGen/AArch64/v3f-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/v3f-to-int.ll
@@ -11,7 +11,7 @@ bb:
   %0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
   %1 = fmul reassoc nnan ninf nsz contract afn <3 x float> %0, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
   %2 = fptoui <3 x float> %1 to <3 x i8>
-  %3 = bitcast i8* undef to <3 x i8>*
-  store <3 x i8> %2, <3 x i8>* %3, align 1
+  %3 = bitcast ptr undef to ptr
+  store <3 x i8> %2, ptr %3, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/win-catchpad-nested-cxx.ll b/llvm/test/CodeGen/AArch64/win-catchpad-nested-cxx.ll
index e3e439101b5eb..6d0e9d6929709 100644
--- a/llvm/test/CodeGen/AArch64/win-catchpad-nested-cxx.ll
+++ b/llvm/test/CodeGen/AArch64/win-catchpad-nested-cxx.ll
@@ -18,7 +18,7 @@
 declare void @f(i32 %p)
 declare i32 @__CxxFrameHandler3(...)
 
-define i32 @try_in_catch() personality i32 (...)* @__CxxFrameHandler3 {
+define i32 @try_in_catch() personality ptr @__CxxFrameHandler3 {
 entry:
   invoke void @f(i32 1)
           to label %try.cont unwind label %catch.dispatch.1
@@ -28,7 +28,7 @@ try.cont:
 catch.dispatch.1:
   %cs1 = catchswitch within none [label %handler1] unwind to caller
 handler1:
-  %h1 = catchpad within %cs1 [i8* null, i32 64, i8* null]
+  %h1 = catchpad within %cs1 [ptr null, i32 64, ptr null]
   invoke void @f(i32 2) [ "funclet"(token %h1) ]
           to label %catchret1 unwind label %catch.dispatch.2
 catchret1:
@@ -37,7 +37,7 @@ catchret1:
 catch.dispatch.2:
   %cs2 = catchswitch within %h1 [label %handler2] unwind to caller
 handler2:
-  %h2 = catchpad within %cs2 [i8* null, i32 64, i8* null]
+  %h2 = catchpad within %cs2 [ptr null, i32 64, ptr null]
   call void @f(i32 3)
   catchret from %h2 to label %catchret1
 }

diff  --git a/llvm/test/CodeGen/AArch64/wineh-frame5.mir b/llvm/test/CodeGen/AArch64/wineh-frame5.mir
index b1708db24aa12..180c20f0148f5 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame5.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame5.mir
@@ -39,26 +39,26 @@
     ret i32 %call1
 
   if.else:                                          ; preds = %entry
-    %0 = bitcast [123 x i32]* %B to i8*
-    call void @llvm.lifetime.start.p0i8(i64 492, i8* nonnull %0) #3
-    %arraydecay7 = bitcast [123 x i32]* %B to i32*
-    %call2 = call i32 @"?func3@@YAHPEAH at Z"(i32* nonnull %arraydecay7)
-    call void @llvm.lifetime.end.p0i8(i64 492, i8* nonnull %0) #3
+    %0 = bitcast ptr %B to ptr
+    call void @llvm.lifetime.start.p0(i64 492, ptr nonnull %0) #3
+    %arraydecay7 = bitcast ptr %B to ptr
+    %call2 = call i32 @"?func3@@YAHPEAH at Z"(ptr nonnull %arraydecay7)
+    call void @llvm.lifetime.end.p0(i64 492, ptr nonnull %0) #3
     ret i32 %call2
   }
 
   ; Function Attrs: argmemonly nounwind
-  declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+  declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
   declare dso_local i32 @"?func2@@YAHXZ"() local_unnamed_addr #2
 
-  declare dso_local i32 @"?func3@@YAHPEAH at Z"(i32*) local_unnamed_addr #2
+  declare dso_local i32 @"?func3@@YAHPEAH at Z"(ptr) local_unnamed_addr #2
 
   ; Function Attrs: argmemonly nounwind
-  declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+  declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #3
+  declare void @llvm.stackprotector(ptr, ptr) #3
 
   attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/wineh-frame6.mir b/llvm/test/CodeGen/AArch64/wineh-frame6.mir
index 913f12202c2b0..188a905a0ab75 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame6.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame6.mir
@@ -27,25 +27,25 @@
     %b.addr = alloca i32, align 4
     %idx.addr = alloca i32, align 4
     %n.addr = alloca i32, align 4
-    %a = alloca i32*, align 8
-    store i32 %c, i32* %c.addr, align 4
-    store i32 %b, i32* %b.addr, align 4
-    store i32 %idx, i32* %idx.addr, align 4
-    store i32 %n, i32* %n.addr, align 4
-    %0 = load i32, i32* %n.addr, align 4
+    %a = alloca ptr, align 8
+    store i32 %c, ptr %c.addr, align 4
+    store i32 %b, ptr %b.addr, align 4
+    store i32 %idx, ptr %idx.addr, align 4
+    store i32 %n, ptr %n.addr, align 4
+    %0 = load i32, ptr %n.addr, align 4
     %conv = sext i32 %0 to i64
     %1 = alloca i8, i64 %conv, align 16
-    %2 = bitcast i8* %1 to i32*
-    store i32* %2, i32** %a, align 8
-    %3 = load i32*, i32** %a, align 8
-    call void @"?init@@YAXPEAH at Z"(i32* %3)
+    %2 = bitcast ptr %1 to ptr
+    store ptr %2, ptr %a, align 8
+    %3 = load ptr, ptr %a, align 8
+    call void @"?init@@YAXPEAH at Z"(ptr %3)
     ret i32 0
   }
 
-  declare dso_local void @"?init@@YAXPEAH at Z"(i32*) #1
+  declare dso_local void @"?init@@YAXPEAH at Z"(ptr) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/AArch64/wineh-frame7.mir b/llvm/test/CodeGen/AArch64/wineh-frame7.mir
index e0c6759f5851c..6d44ad3716111 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame7.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame7.mir
@@ -35,37 +35,37 @@
     %A = alloca [748193 x i32], align 4
     %a = alloca i32, align 4
     %B = alloca [123 x i32], align 4
-    store i32 %i, i32* %i.addr, align 4
-    %0 = load i32, i32* %i.addr, align 4
+    store i32 %i, ptr %i.addr, align 4
+    %0 = load i32, ptr %i.addr, align 4
     %add = add nsw i32 %0, 2
-    store i32 %add, i32* %a, align 4
+    store i32 %add, ptr %a, align 4
     %call = call i32 @"?func2@@YAHXZ"()
-    %1 = load i32, i32* %i.addr, align 4
+    %1 = load i32, ptr %i.addr, align 4
     %cmp = icmp sgt i32 %1, 2
     br i1 %cmp, label %if.then, label %if.else
 
   if.then:                                          ; preds = %entry
     %call1 = call i32 @"?func2@@YAHXZ"()
-    store i32 %call1, i32* %retval, align 4
+    store i32 %call1, ptr %retval, align 4
     br label %return
 
   if.else:                                          ; preds = %entry
-    %arraydecay = getelementptr inbounds [123 x i32], [123 x i32]* %B, i32 0, i32 0
-    %call2 = call i32 @"?func3@@YAHPEAH at Z"(i32* %arraydecay)
-    store i32 %call2, i32* %retval, align 4
+    %arraydecay = getelementptr inbounds [123 x i32], ptr %B, i32 0, i32 0
+    %call2 = call i32 @"?func3@@YAHPEAH at Z"(ptr %arraydecay)
+    store i32 %call2, ptr %retval, align 4
     br label %return
 
   return:                                           ; preds = %if.else, %if.then
-    %2 = load i32, i32* %retval, align 4
+    %2 = load i32, ptr %retval, align 4
     ret i32 %2
   }
 
   declare dso_local i32 @"?func2@@YAHXZ"() #1
 
-  declare dso_local i32 @"?func3@@YAHPEAH at Z"(i32*) #1
+  declare dso_local i32 @"?func3@@YAHPEAH at Z"(ptr) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/AArch64/wineh-frame8.mir b/llvm/test/CodeGen/AArch64/wineh-frame8.mir
index 86a4ceff683b2..be14dcd29c66d 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame8.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame8.mir
@@ -21,10 +21,10 @@
   entry:
     %a.addr = alloca i32, align 4
     %b = alloca i32, align 4
-    store i32 %a, i32* %a.addr, align 4
-    store i32 2, i32* %b, align 4
-    %0 = load i32, i32* %b, align 4
-    %1 = load i32, i32* %a.addr, align 4
+    store i32 %a, ptr %a.addr, align 4
+    store i32 2, ptr %b, align 4
+    %0 = load i32, ptr %b, align 4
+    %1 = load i32, ptr %a.addr, align 4
     %add = add nsw i32 %0, %1
     ret i32 %add
   }

diff  --git a/llvm/test/CodeGen/AArch64/wineh5.mir b/llvm/test/CodeGen/AArch64/wineh5.mir
index 04678cb0a30c8..053db98e6d39b 100644
--- a/llvm/test/CodeGen/AArch64/wineh5.mir
+++ b/llvm/test/CodeGen/AArch64/wineh5.mir
@@ -41,37 +41,37 @@
     %A = alloca [748193 x i32], align 4
     %a = alloca i32, align 4
     %B = alloca [123 x i32], align 4
-    store i32 %i, i32* %i.addr, align 4
-    %0 = load i32, i32* %i.addr, align 4
+    store i32 %i, ptr %i.addr, align 4
+    %0 = load i32, ptr %i.addr, align 4
     %add = add nsw i32 %0, 2
-    store i32 %add, i32* %a, align 4
+    store i32 %add, ptr %a, align 4
     %call = call i32 @"?func2@@YAHXZ"()
-    %1 = load i32, i32* %i.addr, align 4
+    %1 = load i32, ptr %i.addr, align 4
     %cmp = icmp sgt i32 %1, 2
     br i1 %cmp, label %if.then, label %if.else
 
   if.then:                                          ; preds = %entry
     %call1 = call i32 @"?func2@@YAHXZ"()
-    store i32 %call1, i32* %retval, align 4
+    store i32 %call1, ptr %retval, align 4
     br label %return
 
   if.else:                                          ; preds = %entry
-    %arraydecay = getelementptr inbounds [123 x i32], [123 x i32]* %B, i32 0, i32 0
-    %call2 = call i32 @"?func3@@YAHPEAH at Z"(i32* %arraydecay)
-    store i32 %call2, i32* %retval, align 4
+    %arraydecay = getelementptr inbounds [123 x i32], ptr %B, i32 0, i32 0
+    %call2 = call i32 @"?func3@@YAHPEAH at Z"(ptr %arraydecay)
+    store i32 %call2, ptr %retval, align 4
     br label %return
 
   return:                                           ; preds = %if.else, %if.then
-    %2 = load i32, i32* %retval, align 4
+    %2 = load i32, ptr %retval, align 4
     ret i32 %2
   }
 
   declare dso_local i32 @"?func2@@YAHXZ"() #1
 
-  declare dso_local i32 @"?func3@@YAHPEAH at Z"(i32*) #1
+  declare dso_local i32 @"?func3@@YAHPEAH at Z"(ptr) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir b/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir
index aacadd38e90c5..aad271313c850 100644
--- a/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir
+++ b/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir
@@ -29,15 +29,15 @@
     br i1 %cmp, label %if.then, label %return
 
   if.then:                                          ; preds = %entry
-    %0 = bitcast [1000 x i32]* %A to i8*
-    call void @llvm.lifetime.start.p0i8(i64 4000, i8* nonnull %0) #3
-    %arraydecay2 = bitcast [1000 x i32]* %A to i32*
-    call void @"?init@@YAXPEAH at Z"(i32* nonnull %arraydecay2)
-    %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* %A, i64 0, i64 100
-    %1 = load i32, i32* %arrayidx, align 4, !tbaa !2
+    %0 = bitcast ptr %A to ptr
+    call void @llvm.lifetime.start.p0(i64 4000, ptr nonnull %0) #3
+    %arraydecay2 = bitcast ptr %A to ptr
+    call void @"?init@@YAXPEAH at Z"(ptr nonnull %arraydecay2)
+    %arrayidx = getelementptr inbounds [1000 x i32], ptr %A, i64 0, i64 100
+    %1 = load i32, ptr %arrayidx, align 4, !tbaa !2
     %add = add i32 %b, 1
     %add1 = add i32 %add, %1
-    call void @llvm.lifetime.end.p0i8(i64 4000, i8* nonnull %0) #3
+    call void @llvm.lifetime.end.p0(i64 4000, ptr nonnull %0) #3
     br label %return
 
   return:                                           ; preds = %entry, %if.then
@@ -46,15 +46,15 @@
   }
 
   ; Function Attrs: argmemonly nounwind
-  declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+  declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
 
-  declare dso_local void @"?init@@YAXPEAH at Z"(i32*) local_unnamed_addr #2
+  declare dso_local void @"?init@@YAXPEAH at Z"(ptr) local_unnamed_addr #2
 
   ; Function Attrs: argmemonly nounwind
-  declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+  declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #3
+  declare void @llvm.stackprotector(ptr, ptr) #3
 
   attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir b/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
index 63bf69ce82cb6..53a8612a7fae7 100644
--- a/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
+++ b/llvm/test/CodeGen/AArch64/wrong-callee-save-size-after-livedebugvariables.mir
@@ -25,7 +25,7 @@
     ret i8 undef, !dbg !24
   }
 
-  declare dso_local i8 @bar(i8, i8, i8*) #0
+  declare dso_local i8 @bar(i8, i8, ptr) #0
 
   attributes #0 = { noinline nounwind optnone "frame-pointer"="all" }
 

diff  --git a/llvm/test/CodeGen/AArch64/zero-reg.ll b/llvm/test/CodeGen/AArch64/zero-reg.ll
index eca6a662c2d59..ae2d6b47048f9 100644
--- a/llvm/test/CodeGen/AArch64/zero-reg.ll
+++ b/llvm/test/CodeGen/AArch64/zero-reg.ll
@@ -6,9 +6,9 @@
 define dso_local void @test_zr() {
 ; CHECK-LABEL: test_zr:
 
-  store i32 0, i32* @var32
+  store i32 0, ptr @var32
 ; CHECK: str wzr, [{{x[0-9]+}}, {{#?}}:lo12:var32]
-  store i64 0, i64* @var64
+  store i64 0, ptr @var64
 ; CHECK: str xzr, [{{x[0-9]+}}, {{#?}}:lo12:var64]
 
   ret void
@@ -21,8 +21,8 @@ define dso_local void @test_sp(i32 %val) {
 ; Important correctness point here is that LLVM doesn't try to use xzr
 ; as an addressing register: "str w0, [xzr]" is not a valid A64
 ; instruction (0b11111 in the Rn field would mean "sp").
-  %addr = getelementptr i32, i32* null, i64 0
-  store i32 %val, i32* %addr
+  %addr = getelementptr i32, ptr null, i64 0
+  store i32 %val, ptr %addr
 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+|sp}}]
 
   ret void


        


More information about the llvm-commits mailing list