[llvm] 5ddce70 - [AArch64] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 03:36:44 PST 2022


Author: Nikita Popov
Date: 2022-12-19T12:36:19+01:00
New Revision: 5ddce70ef0e5a641d7fea95e31fc5e2439cb98cb

URL: https://github.com/llvm/llvm-project/commit/5ddce70ef0e5a641d7fea95e31fc5e2439cb98cb
DIFF: https://github.com/llvm/llvm-project/commit/5ddce70ef0e5a641d7fea95e31fc5e2439cb98cb.diff

LOG: [AArch64] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/128bit_load_store.ll
    llvm/test/CodeGen/AArch64/2s-complement-asm.ll
    llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll
    llvm/test/CodeGen/AArch64/PBQP-chain.ll
    llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll
    llvm/test/CodeGen/AArch64/Redundantstore.ll
    llvm/test/CodeGen/AArch64/a57-csel.ll
    llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
    llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
    llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll
    llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
    llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
    llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
    llvm/test/CodeGen/AArch64/aarch64-addv.ll
    llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
    llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
    llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll
    llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll
    llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll
    llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
    llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
    llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
    llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll
    llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
    llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
    llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
    llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll
    llvm/test/CodeGen/AArch64/aarch64-mops.ll
    llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
    llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
    llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll
    llvm/test/CodeGen/AArch64/aarch64-smull.ll
    llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
    llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
    llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
    llvm/test/CodeGen/AArch64/aarch64-tbz.ll
    llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll
    llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll
    llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll
    llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll
    llvm/test/CodeGen/AArch64/aarch64-vuzp.ll
    llvm/test/CodeGen/AArch64/aarch64_f16_be.ll
    llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
    llvm/test/CodeGen/AArch64/addcarry-crash.ll
    llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll
    llvm/test/CodeGen/AArch64/addsub-shifted.ll
    llvm/test/CodeGen/AArch64/addsub.ll
    llvm/test/CodeGen/AArch64/alloca.ll
    llvm/test/CodeGen/AArch64/analyzecmp.ll
    llvm/test/CodeGen/AArch64/and-mask-removal.ll
    llvm/test/CodeGen/AArch64/and-sink.ll
    llvm/test/CodeGen/AArch64/andorbrcompare.ll
    llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
    llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll
    llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
    llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll
    llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
    llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
    llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
    llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
    llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
    llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
    llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
    llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
    llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll
    llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll
    llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll
    llvm/test/CodeGen/AArch64/arm64-aapcs.ll
    llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
    llvm/test/CodeGen/AArch64/arm64-abi.ll
    llvm/test/CodeGen/AArch64/arm64-abi_align.ll
    llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
    llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
    llvm/test/CodeGen/AArch64/arm64-addrmode.ll
    llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
    llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
    llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
    llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll
    llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll
    llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
    llvm/test/CodeGen/AArch64/arm64-atomic.ll
    llvm/test/CodeGen/AArch64/arm64-bcc.ll
    llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
    llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll
    llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
    llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
    llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll
    llvm/test/CodeGen/AArch64/arm64-big-stack.ll
    llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
    llvm/test/CodeGen/AArch64/arm64-blockaddress.ll
    llvm/test/CodeGen/AArch64/arm64-build-vector.ll
    llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll
    llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
    llvm/test/CodeGen/AArch64/arm64-cast-opt.ll
    llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
    llvm/test/CodeGen/AArch64/arm64-ccmp.ll
    llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll
    llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
    llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
    llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll
    llvm/test/CodeGen/AArch64/arm64-const-addr.ll
    llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
    llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll
    llvm/test/CodeGen/AArch64/arm64-cse.ll
    llvm/test/CodeGen/AArch64/arm64-csel.ll
    llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll
    llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
    llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
    llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
    llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll
    llvm/test/CodeGen/AArch64/arm64-dup.ll
    llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll
    llvm/test/CodeGen/AArch64/arm64-ext.ll
    llvm/test/CodeGen/AArch64/arm64-extend.ll
    llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll
    llvm/test/CodeGen/AArch64/arm64-fast-isel.ll
    llvm/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
    llvm/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
    llvm/test/CodeGen/AArch64/arm64-fma-combines.ll
    llvm/test/CodeGen/AArch64/arm64-fml-combines.ll
    llvm/test/CodeGen/AArch64/arm64-fmuladd.ll
    llvm/test/CodeGen/AArch64/arm64-fold-address.ll
    llvm/test/CodeGen/AArch64/arm64-fold-lsl.ll
    llvm/test/CodeGen/AArch64/arm64-fp.ll
    llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll
    llvm/test/CodeGen/AArch64/arm64-fp128.ll
    llvm/test/CodeGen/AArch64/arm64-global-address.ll
    llvm/test/CodeGen/AArch64/arm64-hello.ll
    llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
    llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
    llvm/test/CodeGen/AArch64/arm64-indexed-memory.ll
    llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
    llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
    llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
    llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll
    llvm/test/CodeGen/AArch64/arm64-jumptable.ll
    llvm/test/CodeGen/AArch64/arm64-large-frame.ll
    llvm/test/CodeGen/AArch64/arm64-ld-from-st.ll
    llvm/test/CodeGen/AArch64/arm64-ld1.ll
    llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll
    llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
    llvm/test/CodeGen/AArch64/arm64-ldur.ll
    llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
    llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll
    llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
    llvm/test/CodeGen/AArch64/arm64-memset-to-bzero-pgso.ll
    llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
    llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
    llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
    llvm/test/CodeGen/AArch64/arm64-misched-multimmo.ll
    llvm/test/CodeGen/AArch64/arm64-mte.ll
    llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll
    llvm/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll
    llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
    llvm/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll
    llvm/test/CodeGen/AArch64/arm64-neon-vector-shuffle-extract.ll
    llvm/test/CodeGen/AArch64/arm64-nvcast.ll
    llvm/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
    llvm/test/CodeGen/AArch64/arm64-platform-reg.ll
    llvm/test/CodeGen/AArch64/arm64-prefetch.ll
    llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
    llvm/test/CodeGen/AArch64/arm64-promote-const.ll
    llvm/test/CodeGen/AArch64/arm64-redzone.ll
    llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
    llvm/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
    llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
    llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll
    llvm/test/CodeGen/AArch64/arm64-return-vector.ll
    llvm/test/CodeGen/AArch64/arm64-returnaddr.ll
    llvm/test/CodeGen/AArch64/arm64-rev.ll
    llvm/test/CodeGen/AArch64/arm64-scaled_iv.ll
    llvm/test/CodeGen/AArch64/arm64-scvt.ll
    llvm/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll
    llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
    llvm/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
    llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
    llvm/test/CodeGen/AArch64/arm64-spill-lr.ll
    llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll
    llvm/test/CodeGen/AArch64/arm64-spill-remarks.ll
    llvm/test/CodeGen/AArch64/arm64-spill.ll
    llvm/test/CodeGen/AArch64/arm64-srl-and.ll
    llvm/test/CodeGen/AArch64/arm64-st1.ll
    llvm/test/CodeGen/AArch64/arm64-stack-no-frame.ll
    llvm/test/CodeGen/AArch64/arm64-stacksave.ll
    llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll
    llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
    llvm/test/CodeGen/AArch64/arm64-stp.ll
    llvm/test/CodeGen/AArch64/arm64-strict-align.ll
    llvm/test/CodeGen/AArch64/arm64-stur.ll
    llvm/test/CodeGen/AArch64/arm64-this-return.ll
    llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll
    llvm/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
    llvm/test/CodeGen/AArch64/arm64-tls-dynamics.ll
    llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll
    llvm/test/CodeGen/AArch64/arm64-tls-local-exec.ll
    llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
    llvm/test/CodeGen/AArch64/arm64-trn.ll
    llvm/test/CodeGen/AArch64/arm64-trunc-store.ll
    llvm/test/CodeGen/AArch64/arm64-umaxv.ll
    llvm/test/CodeGen/AArch64/arm64-uminv.ll
    llvm/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
    llvm/test/CodeGen/AArch64/arm64-uzp.ll
    llvm/test/CodeGen/AArch64/arm64-vaargs.ll
    llvm/test/CodeGen/AArch64/arm64-vabs.ll
    llvm/test/CodeGen/AArch64/arm64-vadd.ll
    llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
    llvm/test/CodeGen/AArch64/arm64-vbitwise.ll
    llvm/test/CodeGen/AArch64/arm64-vcmp.ll
    llvm/test/CodeGen/AArch64/arm64-vcnt.ll
    llvm/test/CodeGen/AArch64/arm64-vcombine.ll
    llvm/test/CodeGen/AArch64/arm64-vcvt.ll
    llvm/test/CodeGen/AArch64/arm64-vecCmpBr.ll
    llvm/test/CodeGen/AArch64/arm64-vector-ext.ll
    llvm/test/CodeGen/AArch64/arm64-vector-imm.ll
    llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
    llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
    llvm/test/CodeGen/AArch64/arm64-vext.ll
    llvm/test/CodeGen/AArch64/arm64-vhadd.ll
    llvm/test/CodeGen/AArch64/arm64-vhsub.ll
    llvm/test/CodeGen/AArch64/arm64-virtual_base.ll
    llvm/test/CodeGen/AArch64/arm64-vmax.ll
    llvm/test/CodeGen/AArch64/arm64-vmul.ll
    llvm/test/CodeGen/AArch64/arm64-volatile.ll
    llvm/test/CodeGen/AArch64/arm64-vqadd.ll
    llvm/test/CodeGen/AArch64/arm64-vqsub.ll
    llvm/test/CodeGen/AArch64/arm64-vselect.ll
    llvm/test/CodeGen/AArch64/arm64-vshift.ll
    llvm/test/CodeGen/AArch64/arm64-vshr.ll
    llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
    llvm/test/CodeGen/AArch64/arm64-vsqrt.ll
    llvm/test/CodeGen/AArch64/arm64-vsra.ll
    llvm/test/CodeGen/AArch64/arm64-vsub.ll
    llvm/test/CodeGen/AArch64/arm64-weak-reference.ll
    llvm/test/CodeGen/AArch64/arm64-windows-calls.ll
    llvm/test/CodeGen/AArch64/arm64-windows-tailcall.ll
    llvm/test/CodeGen/AArch64/arm64-xaluo.ll
    llvm/test/CodeGen/AArch64/arm64-zeroreg.ll
    llvm/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
    llvm/test/CodeGen/AArch64/arm64-zip.ll
    llvm/test/CodeGen/AArch64/arm64_32-addrs.ll
    llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
    llvm/test/CodeGen/AArch64/arm64_32-fastisel.ll
    llvm/test/CodeGen/AArch64/arm64_32-frame-pointers.ll
    llvm/test/CodeGen/AArch64/arm64_32-gep-sink.ll
    llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll
    llvm/test/CodeGen/AArch64/arm64_32-neon.ll
    llvm/test/CodeGen/AArch64/arm64_32-null.ll
    llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll
    llvm/test/CodeGen/AArch64/arm64_32-stack-pointers.ll
    llvm/test/CodeGen/AArch64/arm64_32-tls.ll
    llvm/test/CodeGen/AArch64/arm64_32-va.ll
    llvm/test/CodeGen/AArch64/arm64_32.ll
    llvm/test/CodeGen/AArch64/arm64ec-varargs.ll
    llvm/test/CodeGen/AArch64/assertion-rc-mismatch.ll
    llvm/test/CodeGen/AArch64/atomic-ops-ldapr.ll
    llvm/test/CodeGen/AArch64/atomic-ops-lse.ll
    llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
    llvm/test/CodeGen/AArch64/atomic-ops.ll
    llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
    llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
    llvm/test/CodeGen/AArch64/basic-pic.ll
    llvm/test/CodeGen/AArch64/bcmp-inline-small.ll
    llvm/test/CodeGen/AArch64/bf16-shuffle.ll
    llvm/test/CodeGen/AArch64/bf16.ll
    llvm/test/CodeGen/AArch64/bfis-in-loop.ll
    llvm/test/CodeGen/AArch64/big-callframe.ll
    llvm/test/CodeGen/AArch64/bitfield-extract.ll
    llvm/test/CodeGen/AArch64/bitfield-insert-0.ll
    llvm/test/CodeGen/AArch64/bitfield-insert.ll
    llvm/test/CodeGen/AArch64/bitfield.ll
    llvm/test/CodeGen/AArch64/blockaddress.ll
    llvm/test/CodeGen/AArch64/bool-loads.ll
    llvm/test/CodeGen/AArch64/br-cond-not-merge.ll
    llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll
    llvm/test/CodeGen/AArch64/br-undef-cond.ll
    llvm/test/CodeGen/AArch64/branch-folder-merge-mmos.ll
    llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
    llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
    llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
    llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll
    llvm/test/CodeGen/AArch64/breg.ll
    llvm/test/CodeGen/AArch64/bswap-known-bits.ll
    llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll
    llvm/test/CodeGen/AArch64/build-one-lane.ll
    llvm/test/CodeGen/AArch64/build-pair-isel.ll
    llvm/test/CodeGen/AArch64/byval-type.ll
    llvm/test/CodeGen/AArch64/call-rv-marker.ll
    llvm/test/CodeGen/AArch64/callbr-asm-label.ll
    llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
    llvm/test/CodeGen/AArch64/callee-save.ll
    llvm/test/CodeGen/AArch64/cfguard-checks.ll
    llvm/test/CodeGen/AArch64/cfguard-module-flag.ll
    llvm/test/CodeGen/AArch64/cgp-trivial-phi-node.ll
    llvm/test/CodeGen/AArch64/cgp-usubo.ll
    llvm/test/CodeGen/AArch64/cmp-bool.ll
    llvm/test/CodeGen/AArch64/cmp-frameindex.ll
    llvm/test/CodeGen/AArch64/cmpwithshort.ll
    llvm/test/CodeGen/AArch64/cmpxchg-O0.ll
    llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
    llvm/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll
    llvm/test/CodeGen/AArch64/code-model-large-abs.ll
    llvm/test/CodeGen/AArch64/code-model-tiny-abs.ll
    llvm/test/CodeGen/AArch64/combine-andintoload.ll
    llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
    llvm/test/CodeGen/AArch64/compare-branch.ll
    llvm/test/CodeGen/AArch64/complex-copy-noneon.ll
    llvm/test/CodeGen/AArch64/complex-int-to-fp.ll
    llvm/test/CodeGen/AArch64/concat-vector.ll
    llvm/test/CodeGen/AArch64/cond-br-tuning.ll
    llvm/test/CodeGen/AArch64/cond-sel.ll
    llvm/test/CodeGen/AArch64/consthoist-gep.ll
    llvm/test/CodeGen/AArch64/convertphitype.ll
    llvm/test/CodeGen/AArch64/copyprop.ll
    llvm/test/CodeGen/AArch64/csel-zero-float.ll
    llvm/test/CodeGen/AArch64/csr-split.ll
    llvm/test/CodeGen/AArch64/cxx-tlscc.ll
    llvm/test/CodeGen/AArch64/dag-ReplaceAllUsesOfValuesWith.ll
    llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll
    llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
    llvm/test/CodeGen/AArch64/dag-combine-select.ll
    llvm/test/CodeGen/AArch64/darwinpcs-tail.ll
    llvm/test/CodeGen/AArch64/dbg-declare-tag-offset.ll
    llvm/test/CodeGen/AArch64/dbg-value-tag-offset.ll
    llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-signed.ll
    llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-unsigned.ll
    llvm/test/CodeGen/AArch64/dllexport.ll
    llvm/test/CodeGen/AArch64/dllimport.ll
    llvm/test/CodeGen/AArch64/dp-3source.ll
    llvm/test/CodeGen/AArch64/dp1.ll
    llvm/test/CodeGen/AArch64/dp2.ll
    llvm/test/CodeGen/AArch64/dwarf-cfi.ll
    llvm/test/CodeGen/AArch64/eh_recoverfp.ll
    llvm/test/CodeGen/AArch64/ehcontguard.ll
    llvm/test/CodeGen/AArch64/elf-globals-pic.ll
    llvm/test/CodeGen/AArch64/elf-globals-static.ll
    llvm/test/CodeGen/AArch64/elf-preemption.ll
    llvm/test/CodeGen/AArch64/eliminate-trunc.ll
    llvm/test/CodeGen/AArch64/emutls.ll
    llvm/test/CodeGen/AArch64/emutls_generic.ll
    llvm/test/CodeGen/AArch64/expand-select.ll
    llvm/test/CodeGen/AArch64/extern-weak.ll
    llvm/test/CodeGen/AArch64/extract-bits.ll
    llvm/test/CodeGen/AArch64/extract-lowbits.ll
    llvm/test/CodeGen/AArch64/f16-convert.ll
    llvm/test/CodeGen/AArch64/f16-instructions.ll
    llvm/test/CodeGen/AArch64/fadd-combines.ll
    llvm/test/CodeGen/AArch64/falkor-hwpf-fix.ll
    llvm/test/CodeGen/AArch64/falkor-hwpf.ll
    llvm/test/CodeGen/AArch64/fast-isel-address-extends.ll
    llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
    llvm/test/CodeGen/AArch64/fast-isel-atomic.ll
    llvm/test/CodeGen/AArch64/fast-isel-branch-cond-mask.ll
    llvm/test/CodeGen/AArch64/fast-isel-branch-uncond-debug.ll
    llvm/test/CodeGen/AArch64/fast-isel-call-return.ll
    llvm/test/CodeGen/AArch64/fast-isel-cbz.ll
    llvm/test/CodeGen/AArch64/fast-isel-cmpxchg.ll
    llvm/test/CodeGen/AArch64/fast-isel-erase.ll
    llvm/test/CodeGen/AArch64/fast-isel-gep.ll
    llvm/test/CodeGen/AArch64/fast-isel-int-ext.ll
    llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll
    llvm/test/CodeGen/AArch64/fast-isel-int-ext3.ll
    llvm/test/CodeGen/AArch64/fast-isel-int-ext5.ll
    llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll
    llvm/test/CodeGen/AArch64/fastisel-debugvalue-undef.ll
    llvm/test/CodeGen/AArch64/fcopysign.ll
    llvm/test/CodeGen/AArch64/flags-multiuse.ll
    llvm/test/CodeGen/AArch64/floatdp_2source.ll
    llvm/test/CodeGen/AArch64/fold-global-offsets.ll
    llvm/test/CodeGen/AArch64/fp-cond-sel.ll
    llvm/test/CodeGen/AArch64/fp-const-fold.ll
    llvm/test/CodeGen/AArch64/fp128-folding.ll
    llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll
    llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll
    llvm/test/CodeGen/AArch64/fp16-vector-load-store.ll
    llvm/test/CodeGen/AArch64/fp16-vector-nvcast.ll
    llvm/test/CodeGen/AArch64/fpimm.ll
    llvm/test/CodeGen/AArch64/frameaddr.ll
    llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
    llvm/test/CodeGen/AArch64/free-zext.ll
    llvm/test/CodeGen/AArch64/func-argpassing.ll
    llvm/test/CodeGen/AArch64/func-calls.ll
    llvm/test/CodeGen/AArch64/funclet-local-stack-size.ll
    llvm/test/CodeGen/AArch64/funclet-match-add-sub-stack.ll
    llvm/test/CodeGen/AArch64/funcptr_cast.ll
    llvm/test/CodeGen/AArch64/gep-nullptr.ll
    llvm/test/CodeGen/AArch64/ghc-cc.ll
    llvm/test/CodeGen/AArch64/global-alignment.ll
    llvm/test/CodeGen/AArch64/global-merge-1.ll
    llvm/test/CodeGen/AArch64/global-merge-2.ll
    llvm/test/CodeGen/AArch64/global-merge-3.ll
    llvm/test/CodeGen/AArch64/global-merge-4.ll
    llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll
    llvm/test/CodeGen/AArch64/global-merge-hidden-minsize.ll
    llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll
    llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll
    llvm/test/CodeGen/AArch64/global-merge-minsize.ll
    llvm/test/CodeGen/AArch64/global-merge.ll
    llvm/test/CodeGen/AArch64/got-abuse.ll
    llvm/test/CodeGen/AArch64/half.ll
    llvm/test/CodeGen/AArch64/hwasan-check-memaccess.ll
    llvm/test/CodeGen/AArch64/hwasan-prefer-fp.ll
    llvm/test/CodeGen/AArch64/i1-contents.ll
    llvm/test/CodeGen/AArch64/i128-align.ll
    llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
    llvm/test/CodeGen/AArch64/illegal-float-ops.ll
    llvm/test/CodeGen/AArch64/ilp32-tlsdesc.ll
    llvm/test/CodeGen/AArch64/ilp32-va.ll
    llvm/test/CodeGen/AArch64/implicit-null-check.ll
    llvm/test/CodeGen/AArch64/init-array.ll
    llvm/test/CodeGen/AArch64/inline-asm-blockaddress.ll
    llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
    llvm/test/CodeGen/AArch64/inline-asm-globaladdress.ll
    llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll
    llvm/test/CodeGen/AArch64/inlineasm-S-constraint.ll
    llvm/test/CodeGen/AArch64/inlineasm-illegal-type.ll
    llvm/test/CodeGen/AArch64/inlineasm-output-template.ll
    llvm/test/CodeGen/AArch64/insert-extend.ll
    llvm/test/CodeGen/AArch64/insert-subvector.ll
    llvm/test/CodeGen/AArch64/int-to-fp-no-neon.ll
    llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
    llvm/test/CodeGen/AArch64/irg.ll
    llvm/test/CodeGen/AArch64/irg_sp_tagp.ll
    llvm/test/CodeGen/AArch64/landingpad-ifcvt.ll
    llvm/test/CodeGen/AArch64/large-stack-cmp.ll
    llvm/test/CodeGen/AArch64/large-stack.ll
    llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
    llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
    llvm/test/CodeGen/AArch64/ldst-opt.ll
    llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
    llvm/test/CodeGen/AArch64/ldst-regoffset.ll
    llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll
    llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
    llvm/test/CodeGen/AArch64/ldst-zero.ll
    llvm/test/CodeGen/AArch64/literal_pools_float.ll
    llvm/test/CodeGen/AArch64/llvm-masked-gather-legal-for-sve.ll
    llvm/test/CodeGen/AArch64/llvm-masked-scatter-legal-for-sve.ll
    llvm/test/CodeGen/AArch64/load-combine-big-endian.ll
    llvm/test/CodeGen/AArch64/load-combine.ll
    llvm/test/CodeGen/AArch64/load-store-forwarding.ll
    llvm/test/CodeGen/AArch64/local_vars.ll
    llvm/test/CodeGen/AArch64/logical-imm.ll
    llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
    llvm/test/CodeGen/AArch64/loop-micro-op-buffer-size-t99.ll
    llvm/test/CodeGen/AArch64/lower-ptrmask.ll
    llvm/test/CodeGen/AArch64/lowerMUL-newload.ll
    llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
    llvm/test/CodeGen/AArch64/ls64-intrinsics.ll
    llvm/test/CodeGen/AArch64/machine-combiner-madd.ll
    llvm/test/CodeGen/AArch64/machine-copy-prop.ll
    llvm/test/CodeGen/AArch64/machine-copy-remove.ll
    llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
    llvm/test/CodeGen/AArch64/machine-outliner-noredzone.ll
    llvm/test/CodeGen/AArch64/machine-outliner-outline-bti.ll
    llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-diff-key.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
    llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll
    llvm/test/CodeGen/AArch64/machine-outliner-tail.ll
    llvm/test/CodeGen/AArch64/machine-outliner-throw.ll
    llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
    llvm/test/CodeGen/AArch64/machine-outliner.ll
    llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
    llvm/test/CodeGen/AArch64/machine_cse.ll
    llvm/test/CodeGen/AArch64/machine_cse_illegal_hoist.ll
    llvm/test/CodeGen/AArch64/macho-global-symbols.ll
    llvm/test/CodeGen/AArch64/memcpy-f128.ll
    llvm/test/CodeGen/AArch64/memset-inline.ll
    llvm/test/CodeGen/AArch64/memset-vs-memset-inline.ll
    llvm/test/CodeGen/AArch64/memset.ll
    llvm/test/CodeGen/AArch64/memsize-remarks.ll
    llvm/test/CodeGen/AArch64/merge-store-dependency.ll
    llvm/test/CodeGen/AArch64/merge-trunc-store.ll
    llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll
    llvm/test/CodeGen/AArch64/midpoint-int.ll
    llvm/test/CodeGen/AArch64/min-max.ll
    llvm/test/CodeGen/AArch64/mingw-refptr.ll
    llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll
    llvm/test/CodeGen/AArch64/misched-fusion-addr-tune.ll
    llvm/test/CodeGen/AArch64/misched-fusion-addr.ll
    llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
    llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
    llvm/test/CodeGen/AArch64/misched-stp.ll
    llvm/test/CodeGen/AArch64/movw-consts.ll
    llvm/test/CodeGen/AArch64/movw-shift-encoding.ll
    llvm/test/CodeGen/AArch64/multi-vector-store-size.ll
    llvm/test/CodeGen/AArch64/neon-addlv.ll
    llvm/test/CodeGen/AArch64/neon-dotpattern.ll
    llvm/test/CodeGen/AArch64/neon-dotreduce.ll
    llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
    llvm/test/CodeGen/AArch64/neon-fpround_f128.ll
    llvm/test/CodeGen/AArch64/neon-sad.ll
    llvm/test/CodeGen/AArch64/neon-truncstore.ll
    llvm/test/CodeGen/AArch64/neon-vmull-high-p64.ll
    llvm/test/CodeGen/AArch64/nest-register.ll
    llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
    llvm/test/CodeGen/AArch64/no_cfi.ll
    llvm/test/CodeGen/AArch64/nontemporal-load.ll
    llvm/test/CodeGen/AArch64/nontemporal.ll
    llvm/test/CodeGen/AArch64/nzcv-save.ll
    llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
    llvm/test/CodeGen/AArch64/optimize-imm.ll
    llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll
    llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-1.ll
    llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-2.ll
    llvm/test/CodeGen/AArch64/paired-load.ll
    llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
    llvm/test/CodeGen/AArch64/pcsections.ll
    llvm/test/CodeGen/AArch64/peephole-and-tst.ll
    llvm/test/CodeGen/AArch64/pic-eh-stubs.ll
    llvm/test/CodeGen/AArch64/pie.ll
    llvm/test/CodeGen/AArch64/popcount.ll
    llvm/test/CodeGen/AArch64/postra-mi-sched.ll
    llvm/test/CodeGen/AArch64/pr27816.ll
    llvm/test/CodeGen/AArch64/pr33172.ll
    llvm/test/CodeGen/AArch64/pr51476.ll
    llvm/test/CodeGen/AArch64/preferred-alignment.ll
    llvm/test/CodeGen/AArch64/prefixdata.ll
    llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll
    llvm/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll
    llvm/test/CodeGen/AArch64/ragreedy-csr.ll
    llvm/test/CodeGen/AArch64/rand.ll
    llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
    llvm/test/CodeGen/AArch64/regress-tail-livereg.ll
    llvm/test/CodeGen/AArch64/regress-tblgen-chains.ll
    llvm/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
    llvm/test/CodeGen/AArch64/relaxed-fp-atomics.ll
    llvm/test/CodeGen/AArch64/remat.ll
    llvm/test/CodeGen/AArch64/reserveXreg.ll
    llvm/test/CodeGen/AArch64/returnaddr.ll
    llvm/test/CodeGen/AArch64/rm_redundant_cmp.ll
    llvm/test/CodeGen/AArch64/rotate.ll
    llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/sched-past-vector-ldst.ll
    llvm/test/CodeGen/AArch64/sdag-store-merging-bug.ll
    llvm/test/CodeGen/AArch64/seh-finally.ll
    llvm/test/CodeGen/AArch64/seh_funclet_x1.ll
    llvm/test/CodeGen/AArch64/select_cc.ll
    llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
    llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll
    llvm/test/CodeGen/AArch64/setjmp-bti-no-enforcement.ll
    llvm/test/CodeGen/AArch64/setjmp-bti-outliner.ll
    llvm/test/CodeGen/AArch64/setjmp-bti.ll
    llvm/test/CodeGen/AArch64/settag-merge-order.ll
    llvm/test/CodeGen/AArch64/settag-merge.ll
    llvm/test/CodeGen/AArch64/settag.ll
    llvm/test/CodeGen/AArch64/shift-amount-mod.ll
    llvm/test/CodeGen/AArch64/shift-by-signext.ll
    llvm/test/CodeGen/AArch64/shift-logic.ll
    llvm/test/CodeGen/AArch64/shrink-wrap.ll
    llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
    llvm/test/CodeGen/AArch64/sibling-call.ll
    llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
    llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
    llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
    llvm/test/CodeGen/AArch64/speculation-hardening.ll
    llvm/test/CodeGen/AArch64/sponentry.ll
    llvm/test/CodeGen/AArch64/sqrt-fastmath.ll
    llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
    llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
    llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
    llvm/test/CodeGen/AArch64/stack-guard-vaarg.ll
    llvm/test/CodeGen/AArch64/stack-protector-musttail.ll
    llvm/test/CodeGen/AArch64/stack-protector-target.ll
    llvm/test/CodeGen/AArch64/stack-tagging-dbg.ll
    llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll
    llvm/test/CodeGen/AArch64/stack-tagging-musttail.ll
    llvm/test/CodeGen/AArch64/stack-tagging-setjmp.ll
    llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll
    llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
    llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll
    llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
    llvm/test/CodeGen/AArch64/stack-tagging.ll
    llvm/test/CodeGen/AArch64/stack_guard_remat.ll
    llvm/test/CodeGen/AArch64/stackguard-internal.ll
    llvm/test/CodeGen/AArch64/stackmap-dynamic-alloca.ll
    llvm/test/CodeGen/AArch64/stackmap-frame-setup.ll
    llvm/test/CodeGen/AArch64/statepoint-call-lowering-lr.ll
    llvm/test/CodeGen/AArch64/statepoint-call-lowering-sp.ll
    llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
    llvm/test/CodeGen/AArch64/stgp.ll
    llvm/test/CodeGen/AArch64/store_merge_pair_offset.ll
    llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
    llvm/test/CodeGen/AArch64/strqro.ll
    llvm/test/CodeGen/AArch64/strqu.ll
    llvm/test/CodeGen/AArch64/subs-to-sub-opt.ll
    llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
    llvm/test/CodeGen/AArch64/sve-coalesce-ptrue-intrinsics.ll
    llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll
    llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
    llvm/test/CodeGen/AArch64/sve-fp.ll
    llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-scaled-offsets.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-unscaled-offsets.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-scaled-offset.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-unscaled-offset.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-scaled-offsets.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-scaled-offsets.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-scaled-offset.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-unscaled-offset.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll
    llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
    llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-ld1r.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
    llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-scaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-unscaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-scaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-unscaled.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-imm.ll
    llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-reg.ll
    llvm/test/CodeGen/AArch64/sve-merging-stores.ll
    llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll
    llvm/test/CodeGen/AArch64/sve-redundant-store.ll
    llvm/test/CodeGen/AArch64/sve-setcc.ll
    llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
    llvm/test/CodeGen/AArch64/sve-trunc.ll
    llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
    llvm/test/CodeGen/AArch64/sve-varargs-caller-broken.ll
    llvm/test/CodeGen/AArch64/sve-varargs.ll
    llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-contiguous-conflict-detection.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-32bit-unscaled-offset.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-scaled-offset.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-unscaled-offset.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-32bit-unscaled-offset.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-scaled-offset.ll
    llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-unscaled-offset.ll
    llvm/test/CodeGen/AArch64/swift-async-pei.ll
    llvm/test/CodeGen/AArch64/swift-async-reg.ll
    llvm/test/CodeGen/AArch64/swift-async-unwind.ll
    llvm/test/CodeGen/AArch64/swift-async-win.ll
    llvm/test/CodeGen/AArch64/swift-async.ll
    llvm/test/CodeGen/AArch64/swift-dynamic-async-frame.ll
    llvm/test/CodeGen/AArch64/swift-error.ll
    llvm/test/CodeGen/AArch64/swift-return.ll
    llvm/test/CodeGen/AArch64/swifterror.ll
    llvm/test/CodeGen/AArch64/swiftself-scavenger.ll
    llvm/test/CodeGen/AArch64/swiftself.ll
    llvm/test/CodeGen/AArch64/swifttail-arm64_32.ll
    llvm/test/CodeGen/AArch64/swifttail-call.ll
    llvm/test/CodeGen/AArch64/tagged-globals-pic.ll
    llvm/test/CodeGen/AArch64/tagged-globals-static.ll
    llvm/test/CodeGen/AArch64/tagp.ll
    llvm/test/CodeGen/AArch64/tailcall-bitcast-memcpy.ll
    llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
    llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll
    llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll
    llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll
    llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll
    llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
    llvm/test/CodeGen/AArch64/taildup-cfi.ll
    llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
    llvm/test/CodeGen/AArch64/tbi.ll
    llvm/test/CodeGen/AArch64/tbl-loops.ll
    llvm/test/CodeGen/AArch64/tbz-tbnz.ll
    llvm/test/CodeGen/AArch64/tst-br.ll
    llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
    llvm/test/CodeGen/AArch64/typepromotion-phisret.ll
    llvm/test/CodeGen/AArch64/typepromotion-signed.ll
    llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/uaddo.ll
    llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
    llvm/test/CodeGen/AArch64/unwind-preserved.ll
    llvm/test/CodeGen/AArch64/usub_sat_vec.ll
    llvm/test/CodeGen/AArch64/v8.4-atomic-128.ll
    llvm/test/CodeGen/AArch64/vararg-tallcall.ll
    llvm/test/CodeGen/AArch64/vcvt-oversize.ll
    llvm/test/CodeGen/AArch64/vec_uaddo.ll
    llvm/test/CodeGen/AArch64/vec_umulo.ll
    llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
    llvm/test/CodeGen/AArch64/vector-gep.ll
    llvm/test/CodeGen/AArch64/vector-insert-shuffle-cycle.ll
    llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll
    llvm/test/CodeGen/AArch64/vldn_shuffle.ll
    llvm/test/CodeGen/AArch64/volatile-combine.ll
    llvm/test/CodeGen/AArch64/vselect-ext.ll
    llvm/test/CodeGen/AArch64/win-alloca-no-stack-probe.ll
    llvm/test/CodeGen/AArch64/win-alloca.ll
    llvm/test/CodeGen/AArch64/win-tls.ll
    llvm/test/CodeGen/AArch64/win64-no-uwtable.ll
    llvm/test/CodeGen/AArch64/win64_vararg.ll
    llvm/test/CodeGen/AArch64/win64_vararg_float.ll
    llvm/test/CodeGen/AArch64/win64_vararg_float_cc.ll
    llvm/test/CodeGen/AArch64/windows-SEH-support.ll
    llvm/test/CodeGen/AArch64/windows-extern-weak.ll
    llvm/test/CodeGen/AArch64/wineh-mingw.ll
    llvm/test/CodeGen/AArch64/wineh-try-catch-cbz.ll
    llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
    llvm/test/CodeGen/AArch64/wineh-try-catch-realign.ll
    llvm/test/CodeGen/AArch64/wineh-try-catch-vla.ll
    llvm/test/CodeGen/AArch64/wineh-try-catch.ll
    llvm/test/CodeGen/AArch64/wineh-unwindhelp-via-fp.ll
    llvm/test/CodeGen/AArch64/wrong_debug_loc_after_regalloc.ll
    llvm/test/CodeGen/AArch64/xor.ll
    llvm/test/CodeGen/AArch64/zext-logic-shift-load.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/128bit_load_store.ll b/llvm/test/CodeGen/AArch64/128bit_load_store.ll
index 38d30dba4b8ce..ee092bc4cb7d2 100644
--- a/llvm/test/CodeGen/AArch64/128bit_load_store.ll
+++ b/llvm/test/CodeGen/AArch64/128bit_load_store.ll
@@ -1,53 +1,49 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=neon | FileCheck %s
 
-define void @test_store_f128(fp128* %ptr, fp128 %val) #0 {
+define void @test_store_f128(ptr %ptr, fp128 %val) #0 {
 ; CHECK-LABEL: test_store_f128
 ; CHECK: str	 {{q[0-9]+}}, [{{x[0-9]+}}]
 entry:
-  store fp128 %val, fp128* %ptr, align 16
+  store fp128 %val, ptr %ptr, align 16
   ret void
 }
 
-define fp128 @test_load_f128(fp128* readonly %ptr) #2 {
+define fp128 @test_load_f128(ptr readonly %ptr) #2 {
 ; CHECK-LABEL: test_load_f128
 ; CHECK: ldr	 {{q[0-9]+}}, [{{x[0-9]+}}]
 entry:
-  %0 = load fp128, fp128* %ptr, align 16
+  %0 = load fp128, ptr %ptr, align 16
   ret fp128 %0
 }
 
-define void @test_vstrq_p128(i128* %ptr, i128 %val) #0 {
+define void @test_vstrq_p128(ptr %ptr, i128 %val) #0 {
 ; CHECK-LABEL: test_vstrq_p128
 ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}]
 
 entry:
-  %0 = bitcast i128* %ptr to fp128*
-  %1 = bitcast i128 %val to fp128
-  store fp128 %1, fp128* %0, align 16
+  %0 = bitcast i128 %val to fp128
+  store fp128 %0, ptr %ptr, align 16
   ret void
 }
 
-define i128 @test_vldrq_p128(i128* readonly %ptr) #2 {
+define i128 @test_vldrq_p128(ptr readonly %ptr) #2 {
 ; CHECK-LABEL: test_vldrq_p128
 ; CHECK: ldp {{x[0-9]+}}, {{x[0-9]+}}, [{{x[0-9]+}}]
 
 entry:
-  %0 = bitcast i128* %ptr to fp128*
-  %1 = load fp128, fp128* %0, align 16
-  %2 = bitcast fp128 %1 to i128
-  ret i128 %2
+  %0 = load fp128, ptr %ptr, align 16
+  %1 = bitcast fp128 %0 to i128
+  ret i128 %1
 }
 
-define void @test_ld_st_p128(i128* nocapture %ptr) #0 {
+define void @test_ld_st_p128(ptr nocapture %ptr) #0 {
 ; CHECK-LABEL: test_ld_st_p128
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
 ; CHECK-NEXT: str	{{q[0-9]+}}, [{{x[0-9]+}}, #16]
 entry:
-  %0 = bitcast i128* %ptr to fp128*
-  %1 = load fp128, fp128* %0, align 16
-  %add.ptr = getelementptr inbounds i128, i128* %ptr, i64 1
-  %2 = bitcast i128* %add.ptr to fp128*
-  store fp128 %1, fp128* %2, align 16
+  %0 = load fp128, ptr %ptr, align 16
+  %add.ptr = getelementptr inbounds i128, ptr %ptr, i64 1
+  store fp128 %0, ptr %add.ptr, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/2s-complement-asm.ll b/llvm/test/CodeGen/AArch64/2s-complement-asm.ll
index cf646d1360204..b58515c497c32 100644
--- a/llvm/test/CodeGen/AArch64/2s-complement-asm.ll
+++ b/llvm/test/CodeGen/AArch64/2s-complement-asm.ll
@@ -4,6 +4,6 @@
 ; CHECK: 0000002a 59ed145d
 @other = global i32 42
 @var = global i32 sub(i32 646102975,
-                      i32 add (i32 trunc(i64 sub(i64 ptrtoint(i32* @var to i64),
-                                                         i64 ptrtoint(i32* @other to i64)) to i32),
+                      i32 add (i32 trunc(i64 sub(i64 ptrtoint(ptr @var to i64),
+                                                         i64 ptrtoint(ptr @other to i64)) to i32),
                                i32 3432360802))

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll b/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll
index abc2cae35a9e2..37c0ea45e8c40 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/invoke-region.ll
@@ -7,7 +7,7 @@ declare void @may_throw()
 
 ; This test checks that the widened G_CONSTANT operand to the phi in "continue" bb
 ; is placed before the potentially throwing call in the entry block.
-define i1 @test_lpad_phi_widen_into_pred() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i1 @test_lpad_phi_widen_into_pred() personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: test_lpad_phi_widen_into_pred
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
@@ -42,15 +42,15 @@ define i1 @test_lpad_phi_widen_into_pred() personality i8* bitcast (i32 (...)* @
   ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C4]]
   ; CHECK-NEXT:   $w0 = COPY [[AND]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
-  store i32 42, i32* @global_var
+  store i32 42, ptr @global_var
   invoke void @may_throw()
           to label %continue unwind label %lpad
 
 lpad:                                             ; preds = %entry
   %p = phi i32 [ 11, %0 ]
-  %1 = landingpad { i8*, i32 }
-          catch i8* null
-  store i32 %p, i32* @global_var
+  %1 = landingpad { ptr, i32 }
+          catch ptr null
+  store i32 %p, ptr @global_var
   br label %continue
 
 continue:                                         ; preds = %entry, %lpad
@@ -59,7 +59,7 @@ continue:                                         ; preds = %entry, %lpad
 }
 
 ; Same test but with extensions.
-define i1 @test_lpad_phi_widen_into_pred_ext(i1 *%ptr) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i1 @test_lpad_phi_widen_into_pred_ext(ptr %ptr) personality ptr @__gxx_personality_v0 {
   ; CHECK-LABEL: name: test_lpad_phi_widen_into_pred_ext
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
@@ -98,16 +98,16 @@ define i1 @test_lpad_phi_widen_into_pred_ext(i1 *%ptr) personality i8* bitcast (
   ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
   ; CHECK-NEXT:   $w0 = COPY [[AND]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
-  store i32 42, i32* @global_var
-  %v = load i1, i1* %ptr
+  store i32 42, ptr @global_var
+  %v = load i1, ptr %ptr
   invoke void @may_throw()
           to label %continue unwind label %lpad
 
 lpad:                                             ; preds = %entry
   %p = phi i32 [ 11, %0 ]
-  %1 = landingpad { i8*, i32 }
-          catch i8* null
-  store i32 %p, i32* @global_var
+  %1 = landingpad { ptr, i32 }
+          catch ptr null
+  store i32 %p, ptr @global_var
   br label %continue
 
 continue:                                         ; preds = %entry, %lpad

diff  --git a/llvm/test/CodeGen/AArch64/PBQP-chain.ll b/llvm/test/CodeGen/AArch64/PBQP-chain.ll
index 3e5fa741c243a..10b299f6afa79 100644
--- a/llvm/test/CodeGen/AArch64/PBQP-chain.ll
+++ b/llvm/test/CodeGen/AArch64/PBQP-chain.ll
@@ -20,85 +20,85 @@ target triple = "aarch64"
 ; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}}
 ; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}}
 ; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}}
-define void @fir(double* nocapture %rx, double* nocapture %ry, double* nocapture %c, double* nocapture %x, double* nocapture %y) {
+define void @fir(ptr nocapture %rx, ptr nocapture %ry, ptr nocapture %c, ptr nocapture %x, ptr nocapture %y) {
 entry:
-  %0 = load double, double* %c, align 8
-  %1 = load double, double* %x, align 8
+  %0 = load double, ptr %c, align 8
+  %1 = load double, ptr %x, align 8
   %mul = fmul fast double %1, %0
-  %2 = load double, double* %y, align 8
+  %2 = load double, ptr %y, align 8
   %mul7 = fmul fast double %2, %0
-  %arrayidx.1 = getelementptr inbounds double, double* %c, i64 1
-  %3 = load double, double* %arrayidx.1, align 8
-  %arrayidx2.1 = getelementptr inbounds double, double* %x, i64 1
-  %4 = load double, double* %arrayidx2.1, align 8
+  %arrayidx.1 = getelementptr inbounds double, ptr %c, i64 1
+  %3 = load double, ptr %arrayidx.1, align 8
+  %arrayidx2.1 = getelementptr inbounds double, ptr %x, i64 1
+  %4 = load double, ptr %arrayidx2.1, align 8
   %mul.1 = fmul fast double %4, %3
   %add.1 = fadd fast double %mul.1, %mul
-  %arrayidx6.1 = getelementptr inbounds double, double* %y, i64 1
-  %5 = load double, double* %arrayidx6.1, align 8
+  %arrayidx6.1 = getelementptr inbounds double, ptr %y, i64 1
+  %5 = load double, ptr %arrayidx6.1, align 8
   %mul7.1 = fmul fast double %5, %3
   %add8.1 = fadd fast double %mul7.1, %mul7
-  %arrayidx.2 = getelementptr inbounds double, double* %c, i64 2
-  %6 = load double, double* %arrayidx.2, align 8
-  %arrayidx2.2 = getelementptr inbounds double, double* %x, i64 2
-  %7 = load double, double* %arrayidx2.2, align 8
+  %arrayidx.2 = getelementptr inbounds double, ptr %c, i64 2
+  %6 = load double, ptr %arrayidx.2, align 8
+  %arrayidx2.2 = getelementptr inbounds double, ptr %x, i64 2
+  %7 = load double, ptr %arrayidx2.2, align 8
   %mul.2 = fmul fast double %7, %6
   %add.2 = fadd fast double %mul.2, %add.1
-  %arrayidx6.2 = getelementptr inbounds double, double* %y, i64 2
-  %8 = load double, double* %arrayidx6.2, align 8
+  %arrayidx6.2 = getelementptr inbounds double, ptr %y, i64 2
+  %8 = load double, ptr %arrayidx6.2, align 8
   %mul7.2 = fmul fast double %8, %6
   %add8.2 = fadd fast double %mul7.2, %add8.1
-  %arrayidx.3 = getelementptr inbounds double, double* %c, i64 3
-  %9 = load double, double* %arrayidx.3, align 8
-  %arrayidx2.3 = getelementptr inbounds double, double* %x, i64 3
-  %10 = load double, double* %arrayidx2.3, align 8
+  %arrayidx.3 = getelementptr inbounds double, ptr %c, i64 3
+  %9 = load double, ptr %arrayidx.3, align 8
+  %arrayidx2.3 = getelementptr inbounds double, ptr %x, i64 3
+  %10 = load double, ptr %arrayidx2.3, align 8
   %mul.3 = fmul fast double %10, %9
   %add.3 = fadd fast double %mul.3, %add.2
-  %arrayidx6.3 = getelementptr inbounds double, double* %y, i64 3
-  %11 = load double, double* %arrayidx6.3, align 8
+  %arrayidx6.3 = getelementptr inbounds double, ptr %y, i64 3
+  %11 = load double, ptr %arrayidx6.3, align 8
   %mul7.3 = fmul fast double %11, %9
   %add8.3 = fadd fast double %mul7.3, %add8.2
-  %arrayidx.4 = getelementptr inbounds double, double* %c, i64 4
-  %12 = load double, double* %arrayidx.4, align 8
-  %arrayidx2.4 = getelementptr inbounds double, double* %x, i64 4
-  %13 = load double, double* %arrayidx2.4, align 8
+  %arrayidx.4 = getelementptr inbounds double, ptr %c, i64 4
+  %12 = load double, ptr %arrayidx.4, align 8
+  %arrayidx2.4 = getelementptr inbounds double, ptr %x, i64 4
+  %13 = load double, ptr %arrayidx2.4, align 8
   %mul.4 = fmul fast double %13, %12
   %add.4 = fadd fast double %mul.4, %add.3
-  %arrayidx6.4 = getelementptr inbounds double, double* %y, i64 4
-  %14 = load double, double* %arrayidx6.4, align 8
+  %arrayidx6.4 = getelementptr inbounds double, ptr %y, i64 4
+  %14 = load double, ptr %arrayidx6.4, align 8
   %mul7.4 = fmul fast double %14, %12
   %add8.4 = fadd fast double %mul7.4, %add8.3
-  %arrayidx.5 = getelementptr inbounds double, double* %c, i64 5
-  %15 = load double, double* %arrayidx.5, align 8
-  %arrayidx2.5 = getelementptr inbounds double, double* %x, i64 5
-  %16 = load double, double* %arrayidx2.5, align 8
+  %arrayidx.5 = getelementptr inbounds double, ptr %c, i64 5
+  %15 = load double, ptr %arrayidx.5, align 8
+  %arrayidx2.5 = getelementptr inbounds double, ptr %x, i64 5
+  %16 = load double, ptr %arrayidx2.5, align 8
   %mul.5 = fmul fast double %16, %15
   %add.5 = fadd fast double %mul.5, %add.4
-  %arrayidx6.5 = getelementptr inbounds double, double* %y, i64 5
-  %17 = load double, double* %arrayidx6.5, align 8
+  %arrayidx6.5 = getelementptr inbounds double, ptr %y, i64 5
+  %17 = load double, ptr %arrayidx6.5, align 8
   %mul7.5 = fmul fast double %17, %15
   %add8.5 = fadd fast double %mul7.5, %add8.4
-  %arrayidx.6 = getelementptr inbounds double, double* %c, i64 6
-  %18 = load double, double* %arrayidx.6, align 8
-  %arrayidx2.6 = getelementptr inbounds double, double* %x, i64 6
-  %19 = load double, double* %arrayidx2.6, align 8
+  %arrayidx.6 = getelementptr inbounds double, ptr %c, i64 6
+  %18 = load double, ptr %arrayidx.6, align 8
+  %arrayidx2.6 = getelementptr inbounds double, ptr %x, i64 6
+  %19 = load double, ptr %arrayidx2.6, align 8
   %mul.6 = fmul fast double %19, %18
   %add.6 = fadd fast double %mul.6, %add.5
-  %arrayidx6.6 = getelementptr inbounds double, double* %y, i64 6
-  %20 = load double, double* %arrayidx6.6, align 8
+  %arrayidx6.6 = getelementptr inbounds double, ptr %y, i64 6
+  %20 = load double, ptr %arrayidx6.6, align 8
   %mul7.6 = fmul fast double %20, %18
   %add8.6 = fadd fast double %mul7.6, %add8.5
-  %arrayidx.7 = getelementptr inbounds double, double* %c, i64 7
-  %21 = load double, double* %arrayidx.7, align 8
-  %arrayidx2.7 = getelementptr inbounds double, double* %x, i64 7
-  %22 = load double, double* %arrayidx2.7, align 8
+  %arrayidx.7 = getelementptr inbounds double, ptr %c, i64 7
+  %21 = load double, ptr %arrayidx.7, align 8
+  %arrayidx2.7 = getelementptr inbounds double, ptr %x, i64 7
+  %22 = load double, ptr %arrayidx2.7, align 8
   %mul.7 = fmul fast double %22, %21
   %add.7 = fadd fast double %mul.7, %add.6
-  %arrayidx6.7 = getelementptr inbounds double, double* %y, i64 7
-  %23 = load double, double* %arrayidx6.7, align 8
+  %arrayidx6.7 = getelementptr inbounds double, ptr %y, i64 7
+  %23 = load double, ptr %arrayidx6.7, align 8
   %mul7.7 = fmul fast double %23, %21
   %add8.7 = fadd fast double %mul7.7, %add8.6
-  store double %add.7, double* %rx, align 8
-  store double %add8.7, double* %ry, align 8
+  store double %add.7, ptr %rx, align 8
+  store double %add8.7, ptr %ry, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll b/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll
index bd50b2d84b742..6c9e250af0886 100644
--- a/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll
+++ b/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll
@@ -1,13 +1,13 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a57 -mattr=+neon -fp-contract=fast -regalloc=pbqp -pbqp-coalescing | FileCheck %s
 
 ; CHECK-LABEL: test:
-define i32 @test(i32 %acc, i32* nocapture readonly %c) {
+define i32 @test(i32 %acc, ptr nocapture readonly %c) {
 entry:
-  %0 = load i32, i32* %c, align 4
+  %0 = load i32, ptr %c, align 4
 ; CHECK-NOT: mov	 w{{[0-9]*}}, w0
   %add = add nsw i32 %0, %acc
-  %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 1
-  %1 = load i32, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %c, i64 1
+  %1 = load i32, ptr %arrayidx1, align 4
   %add2 = add nsw i32 %add, %1
   ret i32 %add2
 }

diff  --git a/llvm/test/CodeGen/AArch64/Redundantstore.ll b/llvm/test/CodeGen/AArch64/Redundantstore.ll
index b7822a882b4ab..6fec5573fdcb1 100644
--- a/llvm/test/CodeGen/AArch64/Redundantstore.ll
+++ b/llvm/test/CodeGen/AArch64/Redundantstore.ll
@@ -1,25 +1,23 @@
 ; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s 
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
- at end_of_array = common global i8* null, align 8
+ at end_of_array = common global ptr null, align 8
 
 ; CHECK-LABEL: @test
 ; CHECK: stur
 ; CHECK-NOT: stur
-define i8* @test(i32 %size) {
+define ptr @test(i32 %size) {
 entry:
-  %0 = load i8*, i8** @end_of_array, align 8
+  %0 = load ptr, ptr @end_of_array, align 8
   %conv = sext i32 %size to i64
   %and = and i64 %conv, -8
   %conv2 = trunc i64 %and to i32
   %add.ptr.sum = add nsw i64 %and, -4
-  %add.ptr3 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
-  %size4 = bitcast i8* %add.ptr3 to i32*
-  store i32 %conv2, i32* %size4, align 4
+  %add.ptr3 = getelementptr inbounds i8, ptr %0, i64 %add.ptr.sum
+  store i32 %conv2, ptr %add.ptr3, align 4
   %add.ptr.sum9 = add nsw i64 %and, -4
-  %add.ptr5 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum9
-  %size6 = bitcast i8* %add.ptr5 to i32*
-  store i32 %conv2, i32* %size6, align 4
-  ret i8* %0
+  %add.ptr5 = getelementptr inbounds i8, ptr %0, i64 %add.ptr.sum9
+  store i32 %conv2, ptr %add.ptr5, align 4
+  ret ptr %0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/a57-csel.ll b/llvm/test/CodeGen/AArch64/a57-csel.ll
index 3c99a90fe28a0..b8df1d9eaa935 100644
--- a/llvm/test/CodeGen/AArch64/a57-csel.ll
+++ b/llvm/test/CodeGen/AArch64/a57-csel.ll
@@ -2,9 +2,9 @@
 
 ; Check that the select isn't expanded into a branch sequence
 ; when the icmp's first operand %x0 is from load.
-define i64 @f(i64 %a, i64 %b, i64* %c, i64 %d, i64 %e) {
+define i64 @f(i64 %a, i64 %b, ptr %c, i64 %d, i64 %e) {
   ; CHECK: csel
-  %x0 = load i64, i64* %c
+  %x0 = load i64, ptr %c
   %x1 = icmp eq i64 %x0, 0
   %x2 = select i1 %x1, i64 %a, i64 %b
   %x3 = add i64 %x2, %d

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
index 0b48bb62851e8..0c1776e61a4d4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
@@ -8,11 +8,11 @@ entry:
   br label %for.body, !dbg !39
 
 for.body:                                         ; preds = %for.body, %entry
-  %arrayidx5 = getelementptr inbounds i32, i32* null, i64 1, !dbg !43
-  %0 = load i32, i32* null, align 4, !dbg !45, !tbaa !46
+  %arrayidx5 = getelementptr inbounds i32, ptr null, i64 1, !dbg !43
+  %0 = load i32, ptr null, align 4, !dbg !45, !tbaa !46
   %s1 = sub nsw i32 0, %0, !dbg !50
   %n1 = sext i32 %s1 to i64, !dbg !50
-  %arrayidx21 = getelementptr inbounds i32, i32* null, i64 3, !dbg !51
+  %arrayidx21 = getelementptr inbounds i32, ptr null, i64 3, !dbg !51
   %add53 = add nsw i64 %n1, 0, !dbg !52
   %add55 = add nsw i64 %n1, 0, !dbg !53
   %mul63 = mul nsw i64 %add53, -20995, !dbg !54
@@ -24,13 +24,13 @@ for.body:                                         ; preds = %for.body, %entry
   %add82 = add i64 %add81, 0, !dbg !58
   %shr83351 = lshr i64 %add82, 11, !dbg !58
   %conv84 = trunc i64 %shr83351 to i32, !dbg !58
-  store i32 %conv84, i32* %arrayidx21, align 4, !dbg !58, !tbaa !46
+  store i32 %conv84, ptr %arrayidx21, align 4, !dbg !58, !tbaa !46
   %add86 = add i64 0, 1024, !dbg !59
   %add87 = add i64 %add86, 0, !dbg !59
   %add88 = add i64 %add87, %add67, !dbg !59
   %shr89352 = lshr i64 %add88, 11, !dbg !59
   %n2 = trunc i64 %shr89352 to i32, !dbg !59
-  store i32 %n2, i32* %arrayidx5, align 4, !dbg !59, !tbaa !46
+  store i32 %n2, ptr %arrayidx5, align 4, !dbg !59, !tbaa !46
   br label %for.body, !dbg !39
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
index b2ee517f88681..9e27e9add6f02 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
@@ -9,8 +9,8 @@ define void @foo() {
 entry:
 ;CHECK-LABEL: foo:
 ;CHECK: __floatsisf
-  %0 = load i32, i32* @x, align 4
+  %0 = load i32, ptr @x, align 4
   %conv = sitofp i32 %0 to float
-  store float %conv, float* bitcast (i32* @t to float*), align 4
+  store float %conv, ptr @t, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll
index 043ce0933a9b8..136093bb960c0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-DAGCombine-findBetterNeighborChains-crash.ll
@@ -3,38 +3,36 @@
 
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 
-declare void @extern(i8*)
+declare void @extern(ptr)
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #0
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #0
 
 ; Function Attrs: nounwind
-define void @func(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 {
+define void @func(ptr noalias %arg, ptr noalias %arg1, ptr noalias %arg2, ptr noalias %arg3) #1 {
 bb:
-  %tmp = getelementptr inbounds i8, i8* %arg2, i64 88
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false)
-  store i8 0, i8* %arg3
-  store i8 2, i8* %arg2
-  store float 0.000000e+00, float* %arg
-  %tmp4 = bitcast i8* %tmp to <4 x float>*
-  store volatile <4 x float> zeroinitializer, <4 x float>* %tmp4
-  store i32 5, i32* %arg1
-  tail call void @extern(i8* %tmp)
+  %tmp = getelementptr inbounds i8, ptr %arg2, i64 88
+  tail call void @llvm.memset.p0.i64(ptr align 8 noalias %arg2, i8 0, i64 40, i1 false)
+  store i8 0, ptr %arg3
+  store i8 2, ptr %arg2
+  store float 0.000000e+00, ptr %arg
+  store volatile <4 x float> zeroinitializer, ptr %tmp
+  store i32 5, ptr %arg1
+  tail call void @extern(ptr %tmp)
   ret void
 }
 
 ; Function Attrs: nounwind
-define void @func2(float* noalias %arg, i32* noalias %arg1, i8* noalias %arg2, i8* noalias %arg3) #1 {
+define void @func2(ptr noalias %arg, ptr noalias %arg1, ptr noalias %arg2, ptr noalias %arg3) #1 {
 bb:
-  %tmp = getelementptr inbounds i8, i8* %arg2, i64 88
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 noalias %arg2, i8 0, i64 40, i1 false)
-  store i8 0, i8* %arg3
-  store i8 2, i8* %arg2
-  store float 0.000000e+00, float* %arg
-  %tmp4 = bitcast i8* %tmp to <4 x float>*
-  store <4 x float> zeroinitializer, <4 x float>* %tmp4
-  store i32 5, i32* %arg1
-  tail call void @extern(i8* %tmp)
+  %tmp = getelementptr inbounds i8, ptr %arg2, i64 88
+  tail call void @llvm.memset.p0.i64(ptr align 8 noalias %arg2, i8 0, i64 40, i1 false)
+  store i8 0, ptr %arg3
+  store i8 2, ptr %arg2
+  store float 0.000000e+00, ptr %arg
+  store <4 x float> zeroinitializer, ptr %tmp
+  store i32 5, ptr %arg1
+  tail call void @extern(ptr %tmp)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
index 4a126113d9371..e90fa3044aa22 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
@@ -33,17 +33,17 @@ target triple = "aarch64"
 ; CHECK: fmadd [[x]]
 ; CHECK: str [[x]]
 
-define void @f1(double* nocapture readonly %p, double* nocapture %q) #0 {
+define void @f1(ptr nocapture readonly %p, ptr nocapture %q) #0 {
 entry:
-  %0 = load double, double* %p, align 8
-  %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
-  %1 = load double, double* %arrayidx1, align 8
-  %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
-  %2 = load double, double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
-  %3 = load double, double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
-  %4 = load double, double* %arrayidx4, align 8
+  %0 = load double, ptr %p, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1
+  %1 = load double, ptr %arrayidx1, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2
+  %2 = load double, ptr %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3
+  %3 = load double, ptr %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4
+  %4 = load double, ptr %arrayidx4, align 8
   %mul = fmul fast double %0, %1
   %add = fadd fast double %mul, %4
   %mul5 = fmul fast double %1, %2
@@ -52,20 +52,20 @@ entry:
   %sub = fsub fast double %add6, %mul7
   %mul8 = fmul fast double %2, %3
   %add9 = fadd fast double %mul8, %sub
-  store double %add9, double* %q, align 8
-  %arrayidx11 = getelementptr inbounds double, double* %p, i64 5
-  %5 = load double, double* %arrayidx11, align 8
-  %arrayidx12 = getelementptr inbounds double, double* %p, i64 6
-  %6 = load double, double* %arrayidx12, align 8
-  %arrayidx13 = getelementptr inbounds double, double* %p, i64 7
-  %7 = load double, double* %arrayidx13, align 8
+  store double %add9, ptr %q, align 8
+  %arrayidx11 = getelementptr inbounds double, ptr %p, i64 5
+  %5 = load double, ptr %arrayidx11, align 8
+  %arrayidx12 = getelementptr inbounds double, ptr %p, i64 6
+  %6 = load double, ptr %arrayidx12, align 8
+  %arrayidx13 = getelementptr inbounds double, ptr %p, i64 7
+  %7 = load double, ptr %arrayidx13, align 8
   %mul15 = fmul fast double %6, %7
   %mul16 = fmul fast double %0, %5
   %add17 = fadd fast double %mul16, %mul15
   %mul18 = fmul fast double %5, %6
   %add19 = fadd fast double %mul18, %add17
-  %arrayidx20 = getelementptr inbounds double, double* %q, i64 1
-  store double %add19, double* %arrayidx20, align 8
+  %arrayidx20 = getelementptr inbounds double, ptr %q, i64 1
+  store double %add19, ptr %arrayidx20, align 8
   ret void
 }
 
@@ -85,23 +85,23 @@ entry:
 ; CHECK-A53-DAG: str [[x]]
 ; CHECK-A53-DAG: str [[y]]
 
-define void @f2(double* nocapture readonly %p, double* nocapture %q) #0 {
+define void @f2(ptr nocapture readonly %p, ptr nocapture %q) #0 {
 entry:
-  %0 = load double, double* %p, align 8
-  %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
-  %1 = load double, double* %arrayidx1, align 8
-  %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
-  %2 = load double, double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
-  %3 = load double, double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
-  %4 = load double, double* %arrayidx4, align 8
-  %arrayidx5 = getelementptr inbounds double, double* %p, i64 5
-  %5 = load double, double* %arrayidx5, align 8
-  %arrayidx6 = getelementptr inbounds double, double* %p, i64 6
-  %6 = load double, double* %arrayidx6, align 8
-  %arrayidx7 = getelementptr inbounds double, double* %p, i64 7
-  %7 = load double, double* %arrayidx7, align 8
+  %0 = load double, ptr %p, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1
+  %1 = load double, ptr %arrayidx1, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2
+  %2 = load double, ptr %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3
+  %3 = load double, ptr %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4
+  %4 = load double, ptr %arrayidx4, align 8
+  %arrayidx5 = getelementptr inbounds double, ptr %p, i64 5
+  %5 = load double, ptr %arrayidx5, align 8
+  %arrayidx6 = getelementptr inbounds double, ptr %p, i64 6
+  %6 = load double, ptr %arrayidx6, align 8
+  %arrayidx7 = getelementptr inbounds double, ptr %p, i64 7
+  %7 = load double, ptr %arrayidx7, align 8
   %mul = fmul fast double %0, %1
   %add = fadd fast double %mul, %7
   %mul8 = fmul fast double %5, %6
@@ -115,9 +115,9 @@ entry:
   %add15 = fadd fast double %mul14, %add12
   %mul16 = fmul fast double %2, %3
   %add17 = fadd fast double %mul16, %sub
-  store double %add17, double* %q, align 8
-  %arrayidx19 = getelementptr inbounds double, double* %q, i64 1
-  store double %add15, double* %arrayidx19, align 8
+  store double %add17, ptr %q, align 8
+  %arrayidx19 = getelementptr inbounds double, ptr %q, i64 1
+  store double %add15, ptr %arrayidx19, align 8
   ret void
 }
 
@@ -131,17 +131,17 @@ entry:
 ; CHECK: fmadd [[y:d[0-9]+]], {{.*}}, [[x]]
 ; CHECK: str [[y]]
 
-define void @f3(double* nocapture readonly %p, double* nocapture %q) #0 {
+define void @f3(ptr nocapture readonly %p, ptr nocapture %q) #0 {
 entry:
-  %0 = load double, double* %p, align 8
-  %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
-  %1 = load double, double* %arrayidx1, align 8
-  %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
-  %2 = load double, double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
-  %3 = load double, double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
-  %4 = load double, double* %arrayidx4, align 8
+  %0 = load double, ptr %p, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1
+  %1 = load double, ptr %arrayidx1, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2
+  %2 = load double, ptr %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3
+  %3 = load double, ptr %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4
+  %4 = load double, ptr %arrayidx4, align 8
   %mul = fmul fast double %0, %1
   %add = fadd fast double %mul, %4
   %mul5 = fmul fast double %1, %2
@@ -154,11 +154,11 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  tail call void bitcast (void (...)* @g to void ()*)() #2
+  tail call void @g() #2
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  store double %add9, double* %q, align 8
+  store double %add9, ptr %q, align 8
   ret void
 }
 
@@ -180,23 +180,23 @@ declare void @g(...) #1
 ; CHECK-A53-DAG: str [[x]]
 ; CHECK-A53-DAG: str [[y]]
 
-define void @f4(float* nocapture readonly %p, float* nocapture %q) #0 {
+define void @f4(ptr nocapture readonly %p, ptr nocapture %q) #0 {
 entry:
-  %0 = load float, float* %p, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %p, i64 1
-  %1 = load float, float* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds float, float* %p, i64 2
-  %2 = load float, float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float, float* %p, i64 3
-  %3 = load float, float* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds float, float* %p, i64 4
-  %4 = load float, float* %arrayidx4, align 4
-  %arrayidx5 = getelementptr inbounds float, float* %p, i64 5
-  %5 = load float, float* %arrayidx5, align 4
-  %arrayidx6 = getelementptr inbounds float, float* %p, i64 6
-  %6 = load float, float* %arrayidx6, align 4
-  %arrayidx7 = getelementptr inbounds float, float* %p, i64 7
-  %7 = load float, float* %arrayidx7, align 4
+  %0 = load float, ptr %p, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %p, i64 1
+  %1 = load float, ptr %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %p, i64 2
+  %2 = load float, ptr %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds float, ptr %p, i64 3
+  %3 = load float, ptr %arrayidx3, align 4
+  %arrayidx4 = getelementptr inbounds float, ptr %p, i64 4
+  %4 = load float, ptr %arrayidx4, align 4
+  %arrayidx5 = getelementptr inbounds float, ptr %p, i64 5
+  %5 = load float, ptr %arrayidx5, align 4
+  %arrayidx6 = getelementptr inbounds float, ptr %p, i64 6
+  %6 = load float, ptr %arrayidx6, align 4
+  %arrayidx7 = getelementptr inbounds float, ptr %p, i64 7
+  %7 = load float, ptr %arrayidx7, align 4
   %mul = fmul fast float %0, %1
   %add = fadd fast float %mul, %7
   %mul8 = fmul fast float %5, %6
@@ -210,9 +210,9 @@ entry:
   %add15 = fadd fast float %mul14, %add12
   %mul16 = fmul fast float %2, %3
   %add17 = fadd fast float %mul16, %sub
-  store float %add17, float* %q, align 4
-  %arrayidx19 = getelementptr inbounds float, float* %q, i64 1
-  store float %add15, float* %arrayidx19, align 4
+  store float %add17, ptr %q, align 4
+  %arrayidx19 = getelementptr inbounds float, ptr %q, i64 1
+  store float %add15, ptr %arrayidx19, align 4
   ret void
 }
 
@@ -226,17 +226,17 @@ entry:
 ; CHECK: fmadd [[y:s[0-9]+]], {{.*}}, [[x]]
 ; CHECK: str [[y]]
 
-define void @f5(float* nocapture readonly %p, float* nocapture %q) #0 {
+define void @f5(ptr nocapture readonly %p, ptr nocapture %q) #0 {
 entry:
-  %0 = load float, float* %p, align 4
-  %arrayidx1 = getelementptr inbounds float, float* %p, i64 1
-  %1 = load float, float* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds float, float* %p, i64 2
-  %2 = load float, float* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds float, float* %p, i64 3
-  %3 = load float, float* %arrayidx3, align 4
-  %arrayidx4 = getelementptr inbounds float, float* %p, i64 4
-  %4 = load float, float* %arrayidx4, align 4
+  %0 = load float, ptr %p, align 4
+  %arrayidx1 = getelementptr inbounds float, ptr %p, i64 1
+  %1 = load float, ptr %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds float, ptr %p, i64 2
+  %2 = load float, ptr %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds float, ptr %p, i64 3
+  %3 = load float, ptr %arrayidx3, align 4
+  %arrayidx4 = getelementptr inbounds float, ptr %p, i64 4
+  %4 = load float, ptr %arrayidx4, align 4
   %mul = fmul fast float %0, %1
   %add = fadd fast float %mul, %4
   %mul5 = fmul fast float %1, %2
@@ -249,11 +249,11 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  tail call void bitcast (void (...)* @g to void ()*)() #2
+  tail call void @g() #2
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  store float %add9, float* %q, align 4
+  store float %add9, ptr %q, align 4
   ret void
 }
 
@@ -268,17 +268,17 @@ if.end:                                           ; preds = %if.then, %entry
 ; CHECK: bl hh
 ; CHECK: str d0
 
-define void @f6(double* nocapture readonly %p, double* nocapture %q) #0 {
+define void @f6(ptr nocapture readonly %p, ptr nocapture %q) #0 {
 entry:
-  %0 = load double, double* %p, align 8
-  %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
-  %1 = load double, double* %arrayidx1, align 8
-  %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
-  %2 = load double, double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
-  %3 = load double, double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
-  %4 = load double, double* %arrayidx4, align 8
+  %0 = load double, ptr %p, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1
+  %1 = load double, ptr %arrayidx1, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2
+  %2 = load double, ptr %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3
+  %3 = load double, ptr %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4
+  %4 = load double, ptr %arrayidx4, align 8
   %mul = fmul fast double %0, %1
   %add = fadd fast double %mul, %4
   %mul5 = fmul fast double %1, %2
@@ -288,7 +288,7 @@ entry:
   %mul8 = fmul fast double %2, %3
   %add9 = fadd fast double %mul8, %sub
   %call = tail call double @hh(double %add9) #2
-  store double %call, double* %q, align 8
+  store double %call, ptr %q, align 8
   ret void
 }
 
@@ -303,17 +303,17 @@ declare double @hh(double) #1
 ; CHECK: fmadd [[x:d[0-9]+]]
 ; CHECK: fadd d1, [[x]], [[x]]
 
-define void @f7(double* nocapture readonly %p, double* nocapture %q) #0 {
+define void @f7(ptr nocapture readonly %p, ptr nocapture %q) #0 {
 entry:
-  %0 = load double, double* %p, align 8
-  %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
-  %1 = load double, double* %arrayidx1, align 8
-  %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
-  %2 = load double, double* %arrayidx2, align 8
-  %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
-  %3 = load double, double* %arrayidx3, align 8
-  %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
-  %4 = load double, double* %arrayidx4, align 8
+  %0 = load double, ptr %p, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %p, i64 1
+  %1 = load double, ptr %arrayidx1, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %p, i64 2
+  %2 = load double, ptr %arrayidx2, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %p, i64 3
+  %3 = load double, ptr %arrayidx3, align 8
+  %arrayidx4 = getelementptr inbounds double, ptr %p, i64 4
+  %4 = load double, ptr %arrayidx4, align 8
   %mul = fmul fast double %0, %1
   %add = fadd fast double %mul, %4
   %mul5 = fmul fast double %1, %2

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
index 0c6be21f89073..5041cfbbaa863 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
@@ -11,14 +11,14 @@ invoke.cont145:
   br i1 %or.cond, label %if.then274, label %invoke.cont145
 
 if.then274:
-  %0 = load i32, i32* null, align 4
+  %0 = load i32, ptr null, align 4
   br i1 undef, label %invoke.cont291, label %if.else313
 
 invoke.cont291:
   %idxprom.i.i.i605 = sext i32 %0 to i64
-  %arrayidx.i.i.i607 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i605
+  %arrayidx.i.i.i607 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i605
   %idxprom.i.i.i596 = sext i32 %0 to i64
-  %arrayidx.i.i.i598 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i596
+  %arrayidx.i.i.i598 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i596
   br label %if.end356
 
 if.else313:
@@ -30,7 +30,7 @@ invoke.cont317:
 
 invoke.cont326:
   %idxprom.i.i.i587 = sext i32 %0 to i64
-  %arrayidx.i.i.i589 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i587
+  %arrayidx.i.i.i589 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i587
   %sub329 = fsub fast double undef, undef
   br label %invoke.cont334
 
@@ -40,12 +40,12 @@ invoke.cont334:
 
 invoke.cont342:
   %idxprom.i.i.i578 = sext i32 %0 to i64
-  %arrayidx.i.i.i580 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i578
+  %arrayidx.i.i.i580 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i578
   br label %if.end356
 
 invoke.cont353:
   %idxprom.i.i.i572 = sext i32 %0 to i64
-  %arrayidx.i.i.i574 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i572
+  %arrayidx.i.i.i574 = getelementptr inbounds double, ptr undef, i64 %idxprom.i.i.i572
   br label %if.end356
 
 if.end356:

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
index 07e0ba654d21c..d8280dadc550e 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "arm64-apple-macosx10.9"
 
 ; Check that sexts get promoted above adds.
-define void @foo(i32* nocapture %a, i32 %i) {
+define void @foo(ptr nocapture %a, i32 %i) {
 entry:
 ; CHECK-LABEL: _foo:
 ; CHECK: add
@@ -14,15 +14,15 @@ entry:
 ; CHECK-NEXT: ret
   %add = add nsw i32 %i, 1
   %idxprom = sext i32 %add to i64
-  %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4
   %add1 = add nsw i32 %i, 2
   %idxprom2 = sext i32 %add1 to i64
-  %arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %idxprom2
-  %1 = load i32, i32* %arrayidx3, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %idxprom2
+  %1 = load i32, ptr %arrayidx3, align 4
   %add4 = add nsw i32 %1, %0
   %idxprom5 = sext i32 %i to i64
-  %arrayidx6 = getelementptr inbounds i32, i32* %a, i64 %idxprom5
-  store i32 %add4, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %a, i64 %idxprom5
+  store i32 %add4, ptr %arrayidx6, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index 36b418b0cee1e..2b71126ee175b 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -9,57 +9,57 @@ declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>)
 declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>)
 declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
 
-define i8 @add_B(<16 x i8>* %arr)  {
+define i8 @add_B(ptr %arr)  {
 ; CHECK-LABEL: add_B:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    addv b0, v0.16b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %bin.rdx = load <16 x i8>, <16 x i8>* %arr
+  %bin.rdx = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %bin.rdx)
   ret i8 %r
 }
 
-define i16 @add_H(<8 x i16>* %arr)  {
+define i16 @add_H(ptr %arr)  {
 ; CHECK-LABEL: add_H:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    addv h0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %bin.rdx = load <8 x i16>, <8 x i16>* %arr
+  %bin.rdx = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %bin.rdx)
   ret i16 %r
 }
 
-define i32 @add_S( <4 x i32>* %arr)  {
+define i32 @add_S( ptr %arr)  {
 ; CHECK-LABEL: add_S:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    addv s0, v0.4s
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %bin.rdx = load <4 x i32>, <4 x i32>* %arr
+  %bin.rdx = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %bin.rdx)
   ret i32 %r
 }
 
-define i64 @add_D(<2 x i64>* %arr)  {
+define i64 @add_D(ptr %arr)  {
 ; CHECK-LABEL: add_D:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    addp d0, v0.2d
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %bin.rdx = load <2 x i64>, <2 x i64>* %arr
+  %bin.rdx = load <2 x i64>, ptr %arr
   %r = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %bin.rdx)
   ret i64 %r
 }
 
 declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
 
-define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias nocapture readonly %arg2) {
+define i32 @oversized_ADDV_256(ptr noalias nocapture readonly %arg1, ptr noalias nocapture readonly %arg2) {
 ; CHECK-LABEL: oversized_ADDV_256:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -69,23 +69,21 @@ define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %arg1 to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0, align 1
-  %2 = zext <8 x i8> %1 to <8 x i32>
-  %3 = bitcast i8* %arg2 to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3, align 1
-  %5 = zext <8 x i8> %4 to <8 x i32>
-  %6 = sub nsw <8 x i32> %2, %5
-  %7 = icmp slt <8 x i32> %6, zeroinitializer
-  %8 = sub nsw <8 x i32> zeroinitializer, %6
-  %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
-  %r = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %9)
+  %0 = load <8 x i8>, ptr %arg1, align 1
+  %1 = zext <8 x i8> %0 to <8 x i32>
+  %2 = load <8 x i8>, ptr %arg2, align 1
+  %3 = zext <8 x i8> %2 to <8 x i32>
+  %4 = sub nsw <8 x i32> %1, %3
+  %5 = icmp slt <8 x i32> %4, zeroinitializer
+  %6 = sub nsw <8 x i32> zeroinitializer, %4
+  %7 = select <8 x i1> %5, <8 x i32> %6, <8 x i32> %4
+  %r = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %7)
   ret i32 %r
 }
 
 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
 
-define i32 @oversized_ADDV_512(<16 x i32>* %arr)  {
+define i32 @oversized_ADDV_512(ptr %arr)  {
 ; CHECK-LABEL: oversized_ADDV_512:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -96,7 +94,7 @@ define i32 @oversized_ADDV_512(<16 x i32>* %arr)  {
 ; CHECK-NEXT:    addv s0, v0.4s
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %bin.rdx = load <16 x i32>, <16 x i32>* %arr
+  %bin.rdx = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %bin.rdx)
   ret i32 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
index 6bb4ab74a85c0..dd562a4b2177b 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll
@@ -13,9 +13,9 @@ define dso_local void @movi_modimm_t1() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -29,9 +29,9 @@ define dso_local void @movi_modimm_t2() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -45,9 +45,9 @@ define dso_local void @movi_modimm_t3() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -61,9 +61,9 @@ define dso_local void @movi_modimm_t4() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -77,9 +77,9 @@ define dso_local void @movi_modimm_t5() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -93,9 +93,9 @@ define dso_local void @movi_modimm_t6() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -109,9 +109,9 @@ define dso_local void @movi_modimm_t7() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 511, i16 0, i16 511, i16 0, i16 511, i16 0, i16 511, i16 0>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -125,9 +125,9 @@ define dso_local void @movi_modimm_t8() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -141,9 +141,9 @@ define dso_local void @movi_modimm_t9() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -157,9 +157,9 @@ define dso_local void @movi_modimm_t10() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -173,9 +173,9 @@ define dso_local void @fmov_modimm_t11() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 0, i16 16448, i16 0, i16 16448, i16 0, i16 16448, i16 0, i16 16448>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -189,9 +189,9 @@ define dso_local void @fmov_modimm_t12() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 0, i16 0, i16 0, i16 16327, i16 0, i16 0, i16 0, i16 16327>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -205,9 +205,9 @@ define dso_local void @mvni_modimm_t1() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -221,9 +221,9 @@ define dso_local void @mvni_modimm_t2() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -237,9 +237,9 @@ define dso_local void @mvni_modimm_t3() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -253,9 +253,9 @@ define dso_local void @mvni_modimm_t4() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -269,9 +269,9 @@ define dso_local void @mvni_modimm_t5() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -285,9 +285,9 @@ define dso_local void @mvni_modimm_t6() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -301,9 +301,9 @@ define dso_local void @mvni_modimm_t7() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 65024, i16 65535, i16 65024, i16 65535, i16 65024, i16 65535, i16 65024, i16 65535>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -317,9 +317,9 @@ define dso_local void @mvni_modimm_t8() nounwind {
 ; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = add <8 x i16> %in, <i16 0, i16 65534, i16 0, i16 65534, i16 0, i16 65534, i16 0, i16 65534>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -332,9 +332,9 @@ define dso_local void @bic_modimm_t1() nounwind {
 ; CHECK-NEXT:    bic v0.4s, #1
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = and <8 x i16> %in, <i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -347,9 +347,9 @@ define dso_local void @bic_modimm_t2() nounwind {
 ; CHECK-NEXT:    bic v0.4s, #1, lsl #8
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = and <8 x i16> %in, <i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -362,9 +362,9 @@ define dso_local void @bic_modimm_t3() nounwind {
 ; CHECK-NEXT:    bic v0.4s, #1, lsl #16
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = and <8 x i16> %in, <i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -377,9 +377,9 @@ define dso_local void @bic_modimm_t4() nounwind {
 ; CHECK-NEXT:    bic v0.4s, #1, lsl #24
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = and <8 x i16> %in, <i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -392,9 +392,9 @@ define dso_local void @bic_modimm_t5() nounwind {
 ; CHECK-NEXT:    bic v0.8h, #1
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = and <8 x i16> %in, <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -407,9 +407,9 @@ define dso_local void @bic_modimm_t6() nounwind {
 ; CHECK-NEXT:    bic v0.8h, #1, lsl #8
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = and <8 x i16> %in, <i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -422,9 +422,9 @@ define dso_local void @orr_modimm_t1() nounwind {
 ; CHECK-NEXT:    orr v0.4s, #1
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = or <8 x i16> %in, <i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -437,9 +437,9 @@ define dso_local void @orr_modimm_t2() nounwind {
 ; CHECK-NEXT:    orr v0.4s, #1, lsl #8
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = or <8 x i16> %in, <i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -452,9 +452,9 @@ define dso_local void @orr_modimm_t3() nounwind {
 ; CHECK-NEXT:    orr v0.4s, #1, lsl #16
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = or <8 x i16> %in, <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -467,9 +467,9 @@ define dso_local void @orr_modimm_t4() nounwind {
 ; CHECK-NEXT:    orr v0.4s, #1, lsl #24
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = or <8 x i16> %in, <i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -482,9 +482,9 @@ define dso_local void @orr_modimm_t5() nounwind {
 ; CHECK-NEXT:    orr v0.8h, #1
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = or <8 x i16> %in, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -497,9 +497,9 @@ define dso_local void @orr_modimm_t6() nounwind {
 ; CHECK-NEXT:    orr v0.8h, #1, lsl #8
 ; CHECK-NEXT:    st1 { v0.8h }, [x8]
 ; CHECK-NEXT:    ret
-  %in = load <8 x i16>, <8 x i16>* @vec_v8i16
+  %in = load <8 x i16>, ptr @vec_v8i16
   %rv = or <8 x i16> %in, <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>
-  store <8 x i16> %rv, <8 x i16>* @vec_v8i16
+  store <8 x i16> %rv, ptr @vec_v8i16
   ret void
 }
 
@@ -1031,7 +1031,7 @@ define dso_local void @modimm_t12_call() {
   ret void
 }
 
-define <2 x double> @test_v1f64(<1 x double> %0, <2 x double>* %1) {
+define <2 x double> @test_v1f64(<1 x double> %0, ptr %1) {
 ; CHECK-LABEL: test_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvni v1.2s, #31, msl #16

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
index abbf523e1661c..903da96c3b960 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
@@ -7,69 +7,67 @@
 %struct.bfloat16x4x4_t = type { [4 x <4 x bfloat>] }
 %struct.bfloat16x8x4_t = type { [4 x <8 x bfloat>] }
 
-define <4 x bfloat> @test_vld1_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind {
+define <4 x bfloat> @test_vld1_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <4 x bfloat>*
-  %1 = load <4 x bfloat>, <4 x bfloat>* %0, align 2
-  ret <4 x bfloat> %1
+  %0 = load <4 x bfloat>, ptr %ptr, align 2
+  ret <4 x bfloat> %0
 }
 
-define <8 x bfloat> @test_vld1q_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind {
+define <8 x bfloat> @test_vld1q_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <8 x bfloat>*
-  %1 = load <8 x bfloat>, <8 x bfloat>* %0, align 2
-  ret <8 x bfloat> %1
+  %0 = load <8 x bfloat>, ptr %ptr, align 2
+  ret <8 x bfloat> %0
 }
 
-define <4 x bfloat> @test_vld1_lane_bf16(bfloat* nocapture readonly %ptr, <4 x bfloat> %src) local_unnamed_addr nounwind {
+define <4 x bfloat> @test_vld1_lane_bf16(ptr nocapture readonly %ptr, <4 x bfloat> %src) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    ld1 { v0.h }[0], [x0]
 ; CHECK:    ret
 entry:
-  %0 = load bfloat, bfloat* %ptr, align 2
+  %0 = load bfloat, ptr %ptr, align 2
   %vld1_lane = insertelement <4 x bfloat> %src, bfloat %0, i32 0
   ret <4 x bfloat> %vld1_lane
 }
 
-define <8 x bfloat> @test_vld1q_lane_bf16(bfloat* nocapture readonly %ptr, <8 x bfloat> %src) local_unnamed_addr nounwind {
+define <8 x bfloat> @test_vld1q_lane_bf16(ptr nocapture readonly %ptr, <8 x bfloat> %src) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.h }[7], [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load bfloat, bfloat* %ptr, align 2
+  %0 = load bfloat, ptr %ptr, align 2
   %vld1_lane = insertelement <8 x bfloat> %src, bfloat %0, i32 7
   ret <8 x bfloat> %vld1_lane
 }
 
-define <4 x bfloat> @test_vld1_dup_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind {
+define <4 x bfloat> @test_vld1_dup_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load bfloat, bfloat* %ptr, align 2
+  %0 = load bfloat, ptr %ptr, align 2
   %1 = insertelement <4 x bfloat> undef, bfloat %0, i32 0
   %lane = shufflevector <4 x bfloat> %1, <4 x bfloat> undef, <4 x i32> zeroinitializer
   ret <4 x bfloat> %lane
 }
 
-define %struct.bfloat16x4x2_t @test_vld1_bf16_x2(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x2_t @test_vld1_bf16_x2(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1_bf16_x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.4h, v1.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld1xN = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0bf16(bfloat* %ptr)
+  %vld1xN = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0(ptr %ptr)
   %vld1xN.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld1xN, 0
   %vld1xN.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld1xN, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld1xN.fca.0.extract, 0, 0
@@ -77,15 +75,15 @@ entry:
   ret %struct.bfloat16x4x2_t %.fca.0.1.insert
 }
 
-declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0bf16(bfloat*) nounwind
+declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x2.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x2_t @test_vld1q_bf16_x2(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x2_t @test_vld1q_bf16_x2(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1q_bf16_x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.8h, v1.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld1xN = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0bf16(bfloat* %ptr)
+  %vld1xN = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0(ptr %ptr)
   %vld1xN.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld1xN, 0
   %vld1xN.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld1xN, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld1xN.fca.0.extract, 0, 0
@@ -94,15 +92,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0bf16(bfloat*) nounwind
+declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x2.v8bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x4x3_t @test_vld1_bf16_x3(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x3_t @test_vld1_bf16_x3(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1_bf16_x3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.4h, v1.4h, v2.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0bf16(bfloat* %ptr)
+  %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0(ptr %ptr)
   %vld1xN.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 0
   %vld1xN.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 1
   %vld1xN.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 2
@@ -113,15 +111,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0bf16(bfloat*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x3.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x3_t @test_vld1q_bf16_x3(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x3_t @test_vld1q_bf16_x3(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1q_bf16_x3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.8h, v1.8h, v2.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0bf16(bfloat* %ptr)
+  %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0(ptr %ptr)
   %vld1xN.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 0
   %vld1xN.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 1
   %vld1xN.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 2
@@ -132,15 +130,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0bf16(bfloat*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x3.v8bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x4x4_t @test_vld1_bf16_x4(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x4_t @test_vld1_bf16_x4(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1_bf16_x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0bf16(bfloat* %ptr)
+  %vld1xN = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0(ptr %ptr)
   %vld1xN.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 0
   %vld1xN.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 1
   %vld1xN.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld1xN, 2
@@ -153,15 +151,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0bf16(bfloat*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld1x4.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x4_t @test_vld1q_bf16_x4(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x4_t @test_vld1q_bf16_x4(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1q_bf16_x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0bf16(bfloat* %ptr)
+  %vld1xN = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0(ptr %ptr)
   %vld1xN.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 0
   %vld1xN.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 1
   %vld1xN.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld1xN, 2
@@ -174,28 +172,27 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0bf16(bfloat*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld1x4.v8bf16.p0(ptr) nounwind
 
-define <8 x bfloat> @test_vld1q_dup_bf16(bfloat* nocapture readonly %ptr) local_unnamed_addr nounwind {
+define <8 x bfloat> @test_vld1q_dup_bf16(ptr nocapture readonly %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld1q_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load bfloat, bfloat* %ptr, align 2
+  %0 = load bfloat, ptr %ptr, align 2
   %1 = insertelement <8 x bfloat> undef, bfloat %0, i32 0
   %lane = shufflevector <8 x bfloat> %1, <8 x bfloat> undef, <8 x i32> zeroinitializer
   ret <8 x bfloat> %lane
 }
 
-define %struct.bfloat16x4x2_t @test_vld2_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x2_t @test_vld2_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld2_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld2 { v0.4h, v1.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <4 x bfloat>*
-  %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0v4bf16(<4 x bfloat>* %0)
+  %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0(ptr %ptr)
   %vld2.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld2.fca.0.extract, 0, 0
@@ -204,16 +201,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0v4bf16(<4 x bfloat>*) nounwind
+declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x2_t @test_vld2q_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x2_t @test_vld2q_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld2q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld2 { v0.8h, v1.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <8 x bfloat>*
-  %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0v8bf16(<8 x bfloat>* %0)
+  %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0(ptr %ptr)
   %vld2.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld2.fca.0.extract, 0, 0
@@ -222,8 +218,8 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0v8bf16(<8 x bfloat>*) nounwind
-define %struct.bfloat16x4x2_t @test_vld2_lane_bf16(bfloat* %ptr, [2 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
+declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2.v8bf16.p0(ptr) nounwind
+define %struct.bfloat16x4x2_t @test_vld2_lane_bf16(ptr %ptr, [2 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld2_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    ld2 { v0.h, v1.h }[1], [x0]
@@ -231,8 +227,7 @@ define %struct.bfloat16x4x2_t @test_vld2_lane_bf16(bfloat* %ptr, [2 x <4 x bfloa
 entry:
   %src.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %src.coerce, 0
   %src.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %src.coerce, 1
-  %0 = bitcast bfloat* %ptr to i8*
-  %vld2_lane = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0i8(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, i64 1, i8* %0)
+  %vld2_lane = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, i64 1, ptr %ptr)
   %vld2_lane.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2_lane, 0
   %vld2_lane.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2_lane, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld2_lane.fca.0.extract, 0, 0
@@ -241,9 +236,9 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, i64, i8*) nounwind
+declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, i64, ptr) nounwind
 
-define %struct.bfloat16x8x2_t @test_vld2q_lane_bf16(bfloat* %ptr, [2 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x2_t @test_vld2q_lane_bf16(ptr %ptr, [2 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld2q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    ld2 { v0.h, v1.h }[7], [x0]
@@ -251,8 +246,7 @@ define %struct.bfloat16x8x2_t @test_vld2q_lane_bf16(bfloat* %ptr, [2 x <8 x bflo
 entry:
   %src.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %src.coerce, 0
   %src.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %src.coerce, 1
-  %0 = bitcast bfloat* %ptr to i8*
-  %vld2_lane = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0i8(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, i64 7, i8* %0)
+  %vld2_lane = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, i64 7, ptr %ptr)
   %vld2_lane.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2_lane, 0
   %vld2_lane.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2_lane, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld2_lane.fca.0.extract, 0, 0
@@ -261,16 +255,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, i64, i8*) nounwind
+declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, i64, ptr) nounwind
 
-define %struct.bfloat16x4x3_t @test_vld3_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x3_t @test_vld3_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld3_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld3 { v0.4h, v1.4h, v2.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <4 x bfloat>*
-  %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0v4bf16(<4 x bfloat>* %0)
+  %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0(ptr %ptr)
   %vld3.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 0
   %vld3.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 1
   %vld3.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 2
@@ -281,16 +274,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0v4bf16(<4 x bfloat>*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x3_t @test_vld3q_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x3_t @test_vld3q_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld3q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld3 { v0.8h, v1.8h, v2.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <8 x bfloat>*
-  %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0v8bf16(<8 x bfloat>* %0)
+  %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0(ptr %ptr)
   %vld3.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 0
   %vld3.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 1
   %vld3.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 2
@@ -301,9 +293,9 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0v8bf16(<8 x bfloat>*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3.v8bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x4x3_t @test_vld3_lane_bf16(bfloat* %ptr, [3 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x3_t @test_vld3_lane_bf16(ptr %ptr, [3 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld3_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    ld3 { v0.h, v1.h, v2.h }[1], [x0]
@@ -312,8 +304,7 @@ entry:
   %src.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %src.coerce, 0
   %src.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %src.coerce, 1
   %src.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %src.coerce, 2
-  %0 = bitcast bfloat* %ptr to i8*
-  %vld3_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0i8(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, i64 1, i8* %0)
+  %vld3_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, i64 1, ptr %ptr)
   %vld3_lane.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3_lane, 0
   %vld3_lane.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3_lane, 1
   %vld3_lane.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3_lane, 2
@@ -324,9 +315,9 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr) nounwind
 
-define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(bfloat* %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(ptr %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld3q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECKT:    ld3 { v0.h, v1.h, v2.h }[7], [x0]
@@ -335,8 +326,7 @@ entry:
   %src.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 0
   %src.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 1
   %src.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 2
-  %0 = bitcast bfloat* %ptr to i8*
-  %vld3_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0i8(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, i64 7, i8* %0)
+  %vld3_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, i64 7, ptr %ptr)
   %vld3_lane.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3_lane, 0
   %vld3_lane.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3_lane, 1
   %vld3_lane.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3_lane, 2
@@ -347,16 +337,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr) nounwind
 
-define %struct.bfloat16x4x4_t @test_vld4_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x4_t @test_vld4_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld4_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <4 x bfloat>*
-  %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0v4bf16(<4 x bfloat>* %0)
+  %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0(ptr %ptr)
   %vld4.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 0
   %vld4.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 1
   %vld4.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 2
@@ -369,16 +358,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0v4bf16(<4 x bfloat>*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x4_t @test_vld4q_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x4_t @test_vld4q_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld4q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <8 x bfloat>*
-  %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0v8bf16(<8 x bfloat>* %0)
+  %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0(ptr %ptr)
   %vld4.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 0
   %vld4.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 1
   %vld4.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 2
@@ -391,9 +379,9 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0v8bf16(<8 x bfloat>*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4.v8bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x4x4_t @test_vld4_lane_bf16(bfloat* %ptr, [4 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x4_t @test_vld4_lane_bf16(ptr %ptr, [4 x <4 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld4_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    ld4 { v0.h, v1.h, v2.h, v3.h }[1], [x0]
@@ -403,8 +391,7 @@ entry:
   %src.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %src.coerce, 1
   %src.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %src.coerce, 2
   %src.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %src.coerce, 3
-  %0 = bitcast bfloat* %ptr to i8*
-  %vld4_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0i8(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, <4 x bfloat> %src.coerce.fca.3.extract, i64 1, i8* %0)
+  %vld4_lane = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0(<4 x bfloat> %src.coerce.fca.0.extract, <4 x bfloat> %src.coerce.fca.1.extract, <4 x bfloat> %src.coerce.fca.2.extract, <4 x bfloat> %src.coerce.fca.3.extract, i64 1, ptr %ptr)
   %vld4_lane.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4_lane, 0
   %vld4_lane.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4_lane, 1
   %vld4_lane.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4_lane, 2
@@ -417,9 +404,9 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr) nounwind
 
-define %struct.bfloat16x8x4_t @test_vld4q_lane_bf16(bfloat* %ptr, [4 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x4_t @test_vld4q_lane_bf16(ptr %ptr, [4 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld4q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    ld4 { v0.h, v1.h, v2.h, v3.h }[7], [x0]
@@ -429,8 +416,7 @@ entry:
   %src.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %src.coerce, 1
   %src.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %src.coerce, 2
   %src.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %src.coerce, 3
-  %0 = bitcast bfloat* %ptr to i8*
-  %vld4_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0i8(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, <8 x bfloat> %src.coerce.fca.3.extract, i64 7, i8* %0)
+  %vld4_lane = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0(<8 x bfloat> %src.coerce.fca.0.extract, <8 x bfloat> %src.coerce.fca.1.extract, <8 x bfloat> %src.coerce.fca.2.extract, <8 x bfloat> %src.coerce.fca.3.extract, i64 7, ptr %ptr)
   %vld4_lane.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4_lane, 0
   %vld4_lane.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4_lane, 1
   %vld4_lane.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4_lane, 2
@@ -443,15 +429,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr) nounwind
 
-define %struct.bfloat16x4x2_t @test_vld2_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x2_t @test_vld2_dup_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld2_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld2r { v0.4h, v1.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0bf16(bfloat* %ptr)
+  %vld2 = tail call { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0(ptr %ptr)
   %vld2.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat> } %vld2, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x4x2_t undef, <4 x bfloat> %vld2.fca.0.extract, 0, 0
@@ -460,15 +446,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0bf16(bfloat*) nounwind
+declare { <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld2r.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x2_t @test_vld2q_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x2_t @test_vld2q_dup_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld2q_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld2r { v0.8h, v1.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0bf16(bfloat* %ptr)
+  %vld2 = tail call { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0(ptr %ptr)
   %vld2.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 0
   %vld2.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat> } %vld2, 1
   %.fca.0.0.insert = insertvalue %struct.bfloat16x8x2_t undef, <8 x bfloat> %vld2.fca.0.extract, 0, 0
@@ -477,15 +463,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0bf16(bfloat*) nounwind
+declare { <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld2r.v8bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x4x3_t @test_vld3_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x3_t @test_vld3_dup_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld3_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld3r { v0.4h, v1.4h, v2.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0bf16(bfloat* %ptr)
+  %vld3 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0(ptr %ptr)
   %vld3.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 0
   %vld3.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 1
   %vld3.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld3, 2
@@ -496,15 +482,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0bf16(bfloat*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3r.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x3_t @test_vld3q_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x3_t @test_vld3q_dup_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld3q_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld3r { v0.8h, v1.8h, v2.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0bf16(bfloat* %ptr)
+  %vld3 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0(ptr %ptr)
   %vld3.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 0
   %vld3.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 1
   %vld3.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld3, 2
@@ -515,15 +501,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0bf16(bfloat*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld3r.v8bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x4x4_t @test_vld4_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x4x4_t @test_vld4_dup_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld4_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld4r { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0bf16(bfloat* %ptr)
+  %vld4 = tail call { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0(ptr %ptr)
   %vld4.fca.0.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 0
   %vld4.fca.1.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 1
   %vld4.fca.2.extract = extractvalue { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } %vld4, 2
@@ -536,15 +522,15 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0bf16(bfloat*) nounwind
+declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld4r.v4bf16.p0(ptr) nounwind
 
-define %struct.bfloat16x8x4_t @test_vld4q_dup_bf16(bfloat* %ptr) local_unnamed_addr nounwind {
+define %struct.bfloat16x8x4_t @test_vld4q_dup_bf16(ptr %ptr) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld4q_dup_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld4r { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0bf16(bfloat* %ptr)
+  %vld4 = tail call { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0(ptr %ptr)
   %vld4.fca.0.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 0
   %vld4.fca.1.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 1
   %vld4.fca.2.extract = extractvalue { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } %vld4, 2
@@ -557,53 +543,51 @@ entry:
 }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0bf16(bfloat*) nounwind
+declare { <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat> } @llvm.aarch64.neon.ld4r.v8bf16.p0(ptr) nounwind
 
-define void @test_vst1_bf16(bfloat* nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind {
+define void @test_vst1_bf16(ptr nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <4 x bfloat>*
-  store <4 x bfloat> %val, <4 x bfloat>* %0, align 8
+  store <4 x bfloat> %val, ptr %ptr, align 8
   ret void
 }
 
-define void @test_vst1q_bf16(bfloat* nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind {
+define void @test_vst1q_bf16(ptr nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast bfloat* %ptr to <8 x bfloat>*
-  store <8 x bfloat> %val, <8 x bfloat>* %0, align 16
+  store <8 x bfloat> %val, ptr %ptr, align 16
   ret void
 }
 
-define void @test_vst1_lane_bf16(bfloat* nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind {
+define void @test_vst1_lane_bf16(ptr nocapture %ptr, <4 x bfloat> %val) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st1 { v0.h }[1], [x0]
 ; CHECK:    ret
 entry:
   %0 = extractelement <4 x bfloat> %val, i32 1
-  store bfloat %0, bfloat* %ptr, align 2
+  store bfloat %0, ptr %ptr, align 2
   ret void
 }
 
-define void @test_vst1q_lane_bf16(bfloat* nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind {
+define void @test_vst1q_lane_bf16(ptr nocapture %ptr, <8 x bfloat> %val) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st1 { v0.h }[7], [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <8 x bfloat> %val, i32 7
-  store bfloat %0, bfloat* %ptr, align 2
+  store bfloat %0, ptr %ptr, align 2
   ret void
 }
 
-define void @test_vst1_bf16_x2(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst1_bf16_x2(ptr nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1_bf16_x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st1 { v0.4h, v1.4h }, [x0]
@@ -611,14 +595,14 @@ define void @test_vst1_bf16_x2(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.c
 entry:
   %val.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 1
-  tail call void @llvm.aarch64.neon.st1x2.v4bf16.p0bf16(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, bfloat* %ptr)
+  tail call void @llvm.aarch64.neon.st1x2.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st1x2.v4bf16.p0bf16(<4 x bfloat>, <4 x bfloat>, bfloat* nocapture) nounwind
+declare void @llvm.aarch64.neon.st1x2.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind
 
-define void @test_vst1q_bf16_x2(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst1q_bf16_x2(ptr nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1q_bf16_x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st1 { v0.8h, v1.8h }, [x0]
@@ -626,14 +610,14 @@ define void @test_vst1q_bf16_x2(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.
 entry:
   %val.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 1
-  tail call void @llvm.aarch64.neon.st1x2.v8bf16.p0bf16(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, bfloat* %ptr)
+  tail call void @llvm.aarch64.neon.st1x2.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st1x2.v8bf16.p0bf16(<8 x bfloat>, <8 x bfloat>, bfloat* nocapture) nounwind
+declare void @llvm.aarch64.neon.st1x2.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind
 
-define void @test_vst1_bf16_x3(bfloat* nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst1_bf16_x3(ptr nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1_bf16_x3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st1 { v0.4h, v1.4h, v2.4h }, [x0]
@@ -642,14 +626,14 @@ entry:
   %val.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 2
-  tail call void @llvm.aarch64.neon.st1x3.v4bf16.p0bf16(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, bfloat* %ptr)
+  tail call void @llvm.aarch64.neon.st1x3.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st1x3.v4bf16.p0bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, bfloat* nocapture) nounwind
+declare void @llvm.aarch64.neon.st1x3.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind
 
-define void @test_vst1q_bf16_x3(bfloat* nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst1q_bf16_x3(ptr nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1q_bf16_x3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st1 { v0.8h, v1.8h, v2.8h }, [x0]
@@ -658,15 +642,15 @@ entry:
   %val.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 2
-  tail call void @llvm.aarch64.neon.st1x3.v8bf16.p0bf16(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, bfloat* %ptr)
+  tail call void @llvm.aarch64.neon.st1x3.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st1x3.v8bf16.p0bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, bfloat* nocapture) nounwind
+declare void @llvm.aarch64.neon.st1x3.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst1_bf16_x4(bfloat* nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst1_bf16_x4(ptr nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1_bf16_x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
@@ -676,14 +660,14 @@ entry:
   %val.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 2
   %val.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 3
-  tail call void @llvm.aarch64.neon.st1x4.v4bf16.p0bf16(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, bfloat* %ptr)
+  tail call void @llvm.aarch64.neon.st1x4.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st1x4.v4bf16.p0bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, bfloat* nocapture) nounwind
+declare void @llvm.aarch64.neon.st1x4.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind
 
-define void @test_vst1q_bf16_x4(bfloat* nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst1q_bf16_x4(ptr nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst1q_bf16_x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
@@ -693,14 +677,14 @@ entry:
   %val.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 2
   %val.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 3
-  tail call void @llvm.aarch64.neon.st1x4.v8bf16.p0bf16(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, bfloat* %ptr)
+  tail call void @llvm.aarch64.neon.st1x4.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st1x4.v8bf16.p0bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, bfloat* nocapture) nounwind
+declare void @llvm.aarch64.neon.st1x4.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind
 
-define void @test_vst2_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst2_bf16(ptr nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst2_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st2 { v0.4h, v1.4h }, [x0]
@@ -708,15 +692,14 @@ define void @test_vst2_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coer
 entry:
   %val.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 1
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st2.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, i8* %0)
+  tail call void @llvm.aarch64.neon.st2.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st2.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st2.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind
 
-define void @test_vst2q_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst2q_bf16(ptr nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst2q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st2 { v0.8h, v1.8h }, [x0]
@@ -724,15 +707,14 @@ define void @test_vst2q_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coe
 entry:
   %val.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 1
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st2.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, i8* %0)
+  tail call void @llvm.aarch64.neon.st2.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st2.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st2.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind
 
-define void @test_vst2_lane_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst2_lane_bf16(ptr nocapture %ptr, [2 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst2_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st2 { v0.h, v1.h }[1], [x0]
@@ -740,16 +722,15 @@ define void @test_vst2_lane_bf16(bfloat* nocapture %ptr, [2 x <4 x bfloat>] %val
 entry:
   %val.coerce.fca.0.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [2 x <4 x bfloat>] %val.coerce, 1
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st2lane.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, i64 1, i8* %0)
+  tail call void @llvm.aarch64.neon.st2lane.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, i64 1, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st2lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, i64, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st2lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, i64, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst2q_lane_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst2q_lane_bf16(ptr nocapture %ptr, [2 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst2q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st2 { v0.h, v1.h }[7], [x0]
@@ -757,16 +738,15 @@ define void @test_vst2q_lane_bf16(bfloat* nocapture %ptr, [2 x <8 x bfloat>] %va
 entry:
   %val.coerce.fca.0.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [2 x <8 x bfloat>] %val.coerce, 1
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st2lane.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, i64 7, i8* %0)
+  tail call void @llvm.aarch64.neon.st2lane.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, i64 7, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st2lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, i64, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st2lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, i64, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst3_bf16(bfloat* nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst3_bf16(ptr nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst3_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st3 { v0.4h, v1.4h, v2.4h }, [x0]
@@ -775,16 +755,15 @@ entry:
   %val.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 2
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st3.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, i8* %0)
+  tail call void @llvm.aarch64.neon.st3.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st3.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st3.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst3q_bf16(bfloat* nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst3q_bf16(ptr nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst3q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st3 { v0.8h, v1.8h, v2.8h }, [x0]
@@ -793,16 +772,15 @@ entry:
   %val.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 2
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st3.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, i8* %0)
+  tail call void @llvm.aarch64.neon.st3.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st3.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st3.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst3_lane_bf16(bfloat* nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst3_lane_bf16(ptr nocapture %ptr, [3 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst3_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st3 { v0.h, v1.h, v2.h }[1], [x0]
@@ -811,16 +789,15 @@ entry:
   %val.coerce.fca.0.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [3 x <4 x bfloat>] %val.coerce, 2
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st3lane.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, i64 1, i8* %0)
+  tail call void @llvm.aarch64.neon.st3lane.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, i64 1, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st3lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st3lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst3q_lane_bf16(bfloat* nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst3q_lane_bf16(ptr nocapture %ptr, [3 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst3q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st3 { v0.h, v1.h, v2.h }[7], [x0]
@@ -829,16 +806,15 @@ entry:
   %val.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 0
   %val.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [3 x <8 x bfloat>] %val.coerce, 2
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st3lane.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, i64 7, i8* %0)
+  tail call void @llvm.aarch64.neon.st3lane.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, i64 7, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st3lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st3lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst4_bf16(bfloat* nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst4_bf16(ptr nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst4_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
@@ -848,16 +824,15 @@ entry:
   %val.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 2
   %val.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 3
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st4.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, i8* %0)
+  tail call void @llvm.aarch64.neon.st4.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st4.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st4.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst4q_bf16(bfloat* nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst4q_bf16(ptr nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst4q_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
@@ -867,16 +842,15 @@ entry:
   %val.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 2
   %val.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 3
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st4.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, i8* %0)
+  tail call void @llvm.aarch64.neon.st4.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st4.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st4.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst4_lane_bf16(bfloat* nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst4_lane_bf16(ptr nocapture %ptr, [4 x <4 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst4_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st4 { v0.h, v1.h, v2.h, v3.h }[1], [x0]
@@ -886,16 +860,15 @@ entry:
   %val.coerce.fca.1.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 2
   %val.coerce.fca.3.extract = extractvalue [4 x <4 x bfloat>] %val.coerce, 3
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st4lane.v4bf16.p0i8(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, i64 1, i8* %0)
+  tail call void @llvm.aarch64.neon.st4lane.v4bf16.p0(<4 x bfloat> %val.coerce.fca.0.extract, <4 x bfloat> %val.coerce.fca.1.extract, <4 x bfloat> %val.coerce.fca.2.extract, <4 x bfloat> %val.coerce.fca.3.extract, i64 1, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st4lane.v4bf16.p0i8(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st4lane.v4bf16.p0(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i64, ptr nocapture) nounwind
 
 ; Function Attrs: nounwind
-define void @test_vst4q_lane_bf16(bfloat* nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
+define void @test_vst4q_lane_bf16(ptr nocapture %ptr, [4 x <8 x bfloat>] %val.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vst4q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK:    st4 { v0.h, v1.h, v2.h, v3.h }[7], [x0]
@@ -905,12 +878,11 @@ entry:
   %val.coerce.fca.1.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 1
   %val.coerce.fca.2.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 2
   %val.coerce.fca.3.extract = extractvalue [4 x <8 x bfloat>] %val.coerce, 3
-  %0 = bitcast bfloat* %ptr to i8*
-  tail call void @llvm.aarch64.neon.st4lane.v8bf16.p0i8(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, i64 7, i8* %0)
+  tail call void @llvm.aarch64.neon.st4lane.v8bf16.p0(<8 x bfloat> %val.coerce.fca.0.extract, <8 x bfloat> %val.coerce.fca.1.extract, <8 x bfloat> %val.coerce.fca.2.extract, <8 x bfloat> %val.coerce.fca.3.extract, i64 7, ptr %ptr)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.aarch64.neon.st4lane.v8bf16.p0i8(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, i8* nocapture) nounwind
+declare void @llvm.aarch64.neon.st4lane.v8bf16.p0(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i64, ptr nocapture) nounwind
 
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll b/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll
index 9c7fbdd061565..f5494c3cee0e9 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-checkMergeStoreCandidatesForDependencies.ll
@@ -8,12 +8,12 @@
 ;
 ;   SelectionDAG has 16 nodes:
 ;     t0: ch = EntryToken
-;       t3: i64 = add nuw GlobalAddress:i64<%str0* @g0> 0, Constant:i64<8>
+;       t3: i64 = add nuw GlobalAddress:i64<ptr @g0> 0, Constant:i64<8>
 ;     t6: ch = store<(store (s64) into %ir.sp1, align 1, !tbaa !1)> t0, Constant:i64<0>, t3, undef:i64
-;     t7: i64,ch = load<(load (s64) from `%str1** undef`, align 1)> t6, undef:i64, undef:i64
+;     t7: i64,ch = load<(load (s64) from `ptr undef`, align 1)> t6, undef:i64, undef:i64
 ;       t8: i64 = add nuw t7, Constant:i64<8>
 ;     t9: i64,ch = load<(load (s64) from %ir.lp0, align 1)> t6, t8, undef:i64
-;         t21: ch = store<(store (s64) into %ir.sp01, align 1)> t19:1, Constant:i64<0>, GlobalAddress:i64<%str0* @g0> 0, undef:i64
+;         t21: ch = store<(store (s64) into %ir.sp01, align 1)> t19:1, Constant:i64<0>, GlobalAddress:i64<ptr @g0> 0, undef:i64
 ;       t24: ch = TokenFactor t7:1, t9:1, t21
 ;     t14: ch,glue = CopyToReg t24, Register:i64 $x0, t19
 ;     t19: i64,ch = load<(load (s64) from %ir.lp12, align 1, !tbaa !7)> t0, t9, undef:i64
@@ -33,7 +33,7 @@
 ; performed.
 
 %str0 = type { i64, i64 }
-%str1 = type { i64, %str1* }
+%str1 = type { i64, ptr }
 
 @g0 = external global %str0, align 1
 
@@ -49,15 +49,13 @@ define i64 @foo() {
 ; CHECK-NEXT:    str xzr, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %sp0 = getelementptr inbounds %str0, %str0* @g0, i32 0, i32 0
-  %sp1 = getelementptr inbounds %str0, %str0* @g0, i32 0, i32 1
-  store i64 0, i64* %sp1, align 1, !tbaa !1
-  %l0 = load %str1*, %str1** undef, align 1
-  %lp0 = getelementptr inbounds %str1, %str1* %l0, i32 0, i32 1
-  %l1 = load %str1*, %str1** %lp0, align 1
-  %lp1 = getelementptr inbounds %str1, %str1* %l1, i32 0, i32 0
-  %l2 = load i64, i64* %lp1, align 1, !tbaa !7
-  store i64 0, i64* %sp0, align 1
+  %sp1 = getelementptr inbounds %str0, ptr @g0, i32 0, i32 1
+  store i64 0, ptr %sp1, align 1, !tbaa !1
+  %l0 = load ptr, ptr undef, align 1
+  %lp0 = getelementptr inbounds %str1, ptr %l0, i32 0, i32 1
+  %l1 = load ptr, ptr %lp0, align 1
+  %l2 = load i64, ptr %l1, align 1, !tbaa !7
+  store i64 0, ptr @g0, align 1
   ret i64 %l2
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll b/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll
index 3fe7e65bf2454..92f29dac13fa4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-codegen-prepare-atp.ll
@@ -6,7 +6,7 @@ target triple = "aarch64--linux-gnu"
 %struct.match_state = type { i64, i64  }
 
 ; %add is also promoted by forking an extra sext.
-define void @promoteTwoOne(i32 %i, i32 %j, i64* %P1, i64* %P2 ) {
+define void @promoteTwoOne(i32 %i, i32 %j, ptr %P1, ptr %P2 ) {
 ; CHECK-LABEL: @promoteTwoOne
 ; CHECK-LABEL: entry:
 ; CHECK: %[[SEXT1:.*]] = sext i32 %i to i64
@@ -15,16 +15,16 @@ define void @promoteTwoOne(i32 %i, i32 %j, i64* %P1, i64* %P2 ) {
 entry:
   %add = add nsw i32 %i, %j
   %s = sext i32 %add to i64
-  %addr1 = getelementptr inbounds i64, i64* %P1, i64 %s
-  store i64 %s, i64* %addr1
+  %addr1 = getelementptr inbounds i64, ptr %P1, i64 %s
+  store i64 %s, ptr %addr1
   %s2 = sext i32 %i to i64
-  %addr2 = getelementptr inbounds i64, i64* %P2, i64 %s2
-  store i64 %s2, i64* %addr2
+  %addr2 = getelementptr inbounds i64, ptr %P2, i64 %s2
+  store i64 %s2, ptr %addr2
   ret void
 }
 
 ; Both %add1 and %add2 are promoted by forking extra sexts.
-define void @promoteTwoTwo(i32 %i, i32 %j, i32 %k, i64* %P1, i64* %P2) {
+define void @promoteTwoTwo(i32 %i, i32 %j, i32 %k, ptr %P1, ptr %P2) {
 ; CHECK-LABEL: @promoteTwoTwo
 ; CHECK-LABEL:entry:
 ; CHECK: %[[SEXT1:.*]] = sext i32 %j to i64
@@ -35,16 +35,16 @@ define void @promoteTwoTwo(i32 %i, i32 %j, i32 %k, i64* %P1, i64* %P2) {
 entry:
   %add1 = add nsw i32 %j, %i
   %s = sext i32 %add1 to i64
-  %addr1 = getelementptr inbounds i64, i64* %P1, i64 %s
-  store i64 %s, i64* %addr1
+  %addr1 = getelementptr inbounds i64, ptr %P1, i64 %s
+  store i64 %s, ptr %addr1
   %add2 = add nsw i32 %j, %k
   %s2 = sext i32 %add2 to i64
-  %addr2 = getelementptr inbounds i64, i64* %P2, i64 %s2
-  store i64 %s2, i64* %addr2
+  %addr2 = getelementptr inbounds i64, ptr %P2, i64 %s2
+  store i64 %s2, ptr %addr2
   ret void
 }
 
-define i64 @promoteGEPSunk(i1 %cond, i64* %base, i32 %i) {
+define i64 @promoteGEPSunk(i1 %cond, ptr %base, i32 %i) {
 ; CHECK-LABEL: @promoteGEPSunk
 ; CHECK-LABEL: entry:
 ; CHECK:  %[[SEXT:.*]] = sext i32 %i to i64
@@ -53,14 +53,14 @@ define i64 @promoteGEPSunk(i1 %cond, i64* %base, i32 %i) {
 entry:
   %add = add nsw i32 %i, 1
   %s = sext i32 %add to i64
-  %addr = getelementptr inbounds i64, i64* %base, i64 %s
+  %addr = getelementptr inbounds i64, ptr %base, i64 %s
   %add2 = add nsw i32 %i,  2
   %s2 = sext i32 %add2 to i64
-  %addr2 = getelementptr inbounds i64, i64* %base, i64 %s2
+  %addr2 = getelementptr inbounds i64, ptr %base, i64 %s2
   br i1 %cond, label %if.then, label %if.then2
 if.then:
-  %v = load i64, i64* %addr
-  %v2 = load i64, i64* %addr2
+  %v = load i64, ptr %addr
+  %v2 = load i64, ptr %addr2
   %r = add i64 %v, %v2
   ret i64 %r
 if.then2:

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll
index e0d7759c5a66b..ea21af56dcc76 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll
@@ -191,7 +191,7 @@ entry:
     ret <4 x i32> %out
 }
 
-define void @typei1_orig(i64 %a, i8* %p, <8 x i16>* %q) {
+define void @typei1_orig(i64 %a, ptr %p, ptr %q) {
 ; CHECK-LABEL: typei1_orig:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, #0
@@ -207,7 +207,7 @@ define void @typei1_orig(i64 %a, i8* %p, <8 x i16>* %q) {
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
     %tmp = xor <16 x i1> zeroinitializer, <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
-    %tmp6 = load <8 x i16>, <8 x i16>* %q, align 2
+    %tmp6 = load <8 x i16>, ptr %q, align 2
     %tmp7 = sub <8 x i16> zeroinitializer, %tmp6
     %tmp8 = shufflevector <8 x i16> %tmp7, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
     %tmp9 = icmp slt i64 0, %a
@@ -218,8 +218,7 @@ define void @typei1_orig(i64 %a, i8* %p, <8 x i16>* %q) {
     %tmp14 = icmp ne <16 x i16> %tmp13, zeroinitializer
     %tmp15 = and <16 x i1> %tmp14, %tmp
     %tmp16 = sext <16 x i1> %tmp15 to <16 x i8>
-    %tmp17 = bitcast i8* %p to <16 x i8>*
-    store <16 x i8> %tmp16, <16 x i8>* %tmp17, align 1
+    store <16 x i8> %tmp16, ptr %p, align 1
     ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
index 106074ea9adde..57f7a66cbab69 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
@@ -89,7 +89,7 @@ entry:
   %l1 = alloca i32, align 4
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 4
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 4
   %add1 = or i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
@@ -149,7 +149,7 @@ entry:
   %l1 = alloca i32, align 4
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 4
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 4
   %add1 = add nsw i32 %add, %l1.0.l1.0.
   ret i32 %add1
 }
@@ -171,7 +171,7 @@ entry:
   %l1 = alloca i32, align 128
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 128
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 128
   %add1 = or i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
@@ -244,7 +244,7 @@ entry:
   %l1 = alloca i32, align 128
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 128
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 128
   %add1 = add nsw i32 %add, %l1.0.l1.0.
   ret i32 %add1
 }
@@ -275,11 +275,11 @@ entry:
   %vla = alloca i32, i64 %0, align 4
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 4
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 4
   %add1 = or i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
-  %1 = load volatile i32, i32* %vla, align 4, !tbaa !1
+  %1 = load volatile i32, ptr %vla, align 4, !tbaa !1
   %add3 = add nsw i32 %add2, %1
   ret i32 %add3
 }
@@ -332,9 +332,9 @@ entry:
   %vla = alloca i32, i64 %0, align 4
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 4
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 4
   %add1 = add nsw i32 %add, %l1.0.l1.0.
-  %1 = load volatile i32, i32* %vla, align 4, !tbaa !1
+  %1 = load volatile i32, ptr %vla, align 4, !tbaa !1
   %add2 = add nsw i32 %add1, %1
   ret i32 %add2
 }
@@ -375,11 +375,11 @@ entry:
   %vla = alloca i32, i64 %0, align 4
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 128
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 128
   %add1 = or i32 %add, %l1.0.l1.0.
   %call = tail call i32 @g()
   %add2 = add nsw i32 %add1, %call
-  %1 = load volatile i32, i32* %vla, align 4, !tbaa !1
+  %1 = load volatile i32, ptr %vla, align 4, !tbaa !1
   %add3 = add nsw i32 %add2, %1
   ret i32 %add3
 }
@@ -486,9 +486,9 @@ entry:
   %vla = alloca i32, i64 %0, align 4
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 128
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 128
   %add1 = add nsw i32 %add, %l1.0.l1.0.
-  %1 = load volatile i32, i32* %vla, align 4, !tbaa !1
+  %1 = load volatile i32, ptr %vla, align 4, !tbaa !1
   %add2 = add nsw i32 %add1, %1
   ret i32 %add2
 }
@@ -570,9 +570,9 @@ entry:
   %vla = alloca i32, i64 %0, align 4
   %conv = fptosi double %d10 to i32
   %add = add nsw i32 %conv, %i10
-  %l1.0.l1.0. = load volatile i32, i32* %l1, align 32768
+  %l1.0.l1.0. = load volatile i32, ptr %l1, align 32768
   %add1 = add nsw i32 %add, %l1.0.l1.0.
-  %1 = load volatile i32, i32* %vla, align 4, !tbaa !1
+  %1 = load volatile i32, ptr %vla, align 4, !tbaa !1
   %add2 = add nsw i32 %add1, %1
   ret i32 %add2
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
index 00dcd86ad9fc8..296435adc8de5 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
@@ -20,9 +20,9 @@
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-define i64 @f_load_madd_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+define i64 @f_load_madd_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i64, i64* %c, align 8
+  %0 = load i64, ptr %c, align 8
   %mul = mul nsw i64 %0, %b
   %add = add nsw i64 %mul, %a
   ret i64 %add
@@ -39,9 +39,9 @@ entry:
 ; CHECK-BASIC-PASS-DISABLED-NEXT:  madd
 
 
-define i32 @f_load_madd_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+define i32 @f_load_madd_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i32, i32* %c, align 4
+  %0 = load i32, ptr %c, align 4
   %mul = mul nsw i32 %0, %b
   %add = add nsw i32 %mul, %a
   ret i32 %add
@@ -54,9 +54,9 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	madd
 
 
-define i64 @f_load_msub_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+define i64 @f_load_msub_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i64, i64* %c, align 8
+  %0 = load i64, ptr %c, align 8
   %mul = mul nsw i64 %0, %b
   %sub = sub nsw i64 %a, %mul
   ret i64 %sub
@@ -70,9 +70,9 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
 
-define i32 @f_load_msub_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+define i32 @f_load_msub_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i32, i32* %c, align 4
+  %0 = load i32, ptr %c, align 4
   %mul = mul nsw i32 %0, %b
   %sub = sub nsw i32 %a, %mul
   ret i32 %sub
@@ -85,9 +85,9 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
 
-define i64 @f_load_mul_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+define i64 @f_load_mul_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i64, i64* %c, align 8
+  %0 = load i64, ptr %c, align 8
   %mul = mul nsw i64 %0, %b
   ret i64 %mul
 }
@@ -99,9 +99,9 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
 
-define i32 @f_load_mul_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+define i32 @f_load_mul_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i32, i32* %c, align 4
+  %0 = load i32, ptr %c, align 4
   %mul = mul nsw i32 %0, %b
   ret i32 %mul
 }
@@ -113,9 +113,9 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
 
-define i64 @f_load_mneg_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+define i64 @f_load_mneg_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i64, i64* %c, align 8
+  %0 = load i64, ptr %c, align 8
   %mul = sub i64 0, %b
   %sub = mul i64 %0, %mul
   ret i64 %sub
@@ -131,9 +131,9 @@ entry:
 ; FIXME-CHECK-NOWORKAROUND-NEXT: mneg
 
 
-define i32 @f_load_mneg_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
+define i32 @f_load_mneg_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i32, i32* %c, align 4
+  %0 = load i32, ptr %c, align 4
   %mul = sub i32 0, %b
   %sub = mul i32 %0, %mul
   ret i32 %sub
@@ -148,13 +148,13 @@ entry:
 ; FIXME-CHECK-NOWORKAROUND-NEXT: mneg
 
 
-define i64 @f_load_smaddl(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_smaddl(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
   %mul = mul nsw i64 %conv1, %conv
   %add = add nsw i64 %mul, %a
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = sext i32 %0 to i64
   %add3 = add nsw i64 %add, %conv2
   ret i64 %add3
@@ -168,13 +168,13 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	smaddl
 
 
-define i64 @f_load_smsubl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_smsubl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
   %mul = mul nsw i64 %conv1, %conv
   %sub = sub i64 %a, %mul
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = sext i32 %0 to i64
   %add = add nsw i64 %sub, %conv2
   ret i64 %add
@@ -188,12 +188,12 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	smsubl
 
 
-define i64 @f_load_smull(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_smull(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
   %mul = mul nsw i64 %conv1, %conv
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = sext i32 %0 to i64
   %div = sdiv i64 %mul, %conv2
   ret i64 %div
@@ -206,13 +206,13 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	smull
 
 
-define i64 @f_load_smnegl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_smnegl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
   %mul = sub nsw i64 0, %conv
   %sub = mul i64 %conv1, %mul
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = sext i32 %0 to i64
   %div = sdiv i64 %sub, %conv2
   ret i64 %div
@@ -223,13 +223,13 @@ entry:
 ;        smnegl instructions
 
 
-define i64 @f_load_umaddl(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_umaddl(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
   %mul = mul i64 %conv1, %conv
   %add = add i64 %mul, %a
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = zext i32 %0 to i64
   %add3 = add i64 %add, %conv2
   ret i64 %add3
@@ -243,13 +243,13 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	umaddl
 
 
-define i64 @f_load_umsubl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_umsubl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
   %mul = mul i64 %conv1, %conv
   %sub = sub i64 %a, %mul
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = zext i32 %0 to i64
   %add = add i64 %sub, %conv2
   ret i64 %add
@@ -263,12 +263,12 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	umsubl
 
 
-define i64 @f_load_umull(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_umull(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
   %mul = mul i64 %conv1, %conv
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = zext i32 %0 to i64
   %div = udiv i64 %mul, %conv2
   ret i64 %div
@@ -281,13 +281,13 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	umull
 
 
-define i64 @f_load_umnegl_64(i64 %a, i32 %b, i32 %c, i32* nocapture readonly %d) #0 {
+define i64 @f_load_umnegl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
   %mul = sub nsw i64 0, %conv
   %sub = mul i64 %conv1, %mul
-  %0 = load i32, i32* %d, align 4
+  %0 = load i32, ptr %d, align 4
   %conv2 = zext i32 %0 to i64
   %div = udiv i64 %sub, %conv2
   ret i64 %div
@@ -298,10 +298,10 @@ entry:
 ;        umnegl instructions
 
 
-define i64 @f_store_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+define i64 @f_store_madd_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i64, i64* %cp, align 8
-  store i64 %a, i64* %e, align 8
+  %0 = load i64, ptr %cp, align 8
+  store i64 %a, ptr %e, align 8
   %mul = mul nsw i64 %0, %b
   %add = add nsw i64 %mul, %a
   ret i64 %add
@@ -315,10 +315,10 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	madd
 
 
-define i32 @f_store_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+define i32 @f_store_madd_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i32, i32* %cp, align 4
-  store i32 %a, i32* %e, align 4
+  %0 = load i32, ptr %cp, align 4
+  store i32 %a, ptr %e, align 4
   %mul = mul nsw i32 %0, %b
   %add = add nsw i32 %mul, %a
   ret i32 %add
@@ -331,10 +331,10 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	madd
 
 
-define i64 @f_store_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+define i64 @f_store_msub_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i64, i64* %cp, align 8
-  store i64 %a, i64* %e, align 8
+  %0 = load i64, ptr %cp, align 8
+  store i64 %a, ptr %e, align 8
   %mul = mul nsw i64 %0, %b
   %sub = sub nsw i64 %a, %mul
   ret i64 %sub
@@ -348,10 +348,10 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
 
-define i32 @f_store_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+define i32 @f_store_msub_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i32, i32* %cp, align 4
-  store i32 %a, i32* %e, align 4
+  %0 = load i32, ptr %cp, align 4
+  store i32 %a, ptr %e, align 4
   %mul = mul nsw i32 %0, %b
   %sub = sub nsw i32 %a, %mul
   ret i32 %sub
@@ -364,10 +364,10 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
 
-define i64 @f_store_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+define i64 @f_store_mul_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i64, i64* %cp, align 8
-  store i64 %a, i64* %e, align 8
+  %0 = load i64, ptr %cp, align 8
+  store i64 %a, ptr %e, align 8
   %mul = mul nsw i64 %0, %b
   ret i64 %mul
 }
@@ -379,10 +379,10 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
 
-define i32 @f_store_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+define i32 @f_store_mul_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i32, i32* %cp, align 4
-  store i32 %a, i32* %e, align 4
+  %0 = load i32, ptr %cp, align 4
+  store i32 %a, ptr %e, align 4
   %mul = mul nsw i32 %0, %b
   ret i32 %mul
 }
@@ -394,11 +394,10 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
 
-define i64 @f_prefetch_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+define i64 @f_prefetch_madd_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i64, i64* %cp, align 8
-  %1 = bitcast i64* %e to i8*
-  tail call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
+  %0 = load i64, ptr %cp, align 8
+  tail call void @llvm.prefetch(ptr %e, i32 0, i32 0, i32 1)
   %mul = mul nsw i64 %0, %b
   %add = add nsw i64 %mul, %a
   ret i64 %add
@@ -411,13 +410,12 @@ entry:
 ; CHECK-NOWORKAROUND:	prfm
 ; CHECK-NOWORKAROUND-NEXT:	madd
 
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) #2
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) #2
 
-define i32 @f_prefetch_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+define i32 @f_prefetch_madd_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i32, i32* %cp, align 4
-  %1 = bitcast i32* %e to i8*
-  tail call void @llvm.prefetch(i8* %1, i32 1, i32 0, i32 1)
+  %0 = load i32, ptr %cp, align 4
+  tail call void @llvm.prefetch(ptr %e, i32 1, i32 0, i32 1)
   %mul = mul nsw i32 %0, %b
   %add = add nsw i32 %mul, %a
   ret i32 %add
@@ -429,11 +427,10 @@ entry:
 ; CHECK-NOWORKAROUND:	prfm
 ; CHECK-NOWORKAROUND-NEXT:	madd
 
-define i64 @f_prefetch_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+define i64 @f_prefetch_msub_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i64, i64* %cp, align 8
-  %1 = bitcast i64* %e to i8*
-  tail call void @llvm.prefetch(i8* %1, i32 0, i32 1, i32 1)
+  %0 = load i64, ptr %cp, align 8
+  tail call void @llvm.prefetch(ptr %e, i32 0, i32 1, i32 1)
   %mul = mul nsw i64 %0, %b
   %sub = sub nsw i64 %a, %mul
   ret i64 %sub
@@ -446,11 +443,10 @@ entry:
 ; CHECK-NOWORKAROUND:	prfm
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
-define i32 @f_prefetch_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+define i32 @f_prefetch_msub_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i32, i32* %cp, align 4
-  %1 = bitcast i32* %e to i8*
-  tail call void @llvm.prefetch(i8* %1, i32 1, i32 1, i32 1)
+  %0 = load i32, ptr %cp, align 4
+  tail call void @llvm.prefetch(ptr %e, i32 1, i32 1, i32 1)
   %mul = mul nsw i32 %0, %b
   %sub = sub nsw i32 %a, %mul
   ret i32 %sub
@@ -462,11 +458,10 @@ entry:
 ; CHECK-NOWORKAROUND:	prfm
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
-define i64 @f_prefetch_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
+define i64 @f_prefetch_mul_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i64, i64* %cp, align 8
-  %1 = bitcast i64* %e to i8*
-  tail call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
+  %0 = load i64, ptr %cp, align 8
+  tail call void @llvm.prefetch(ptr %e, i32 0, i32 3, i32 1)
   %mul = mul nsw i64 %0, %b
   ret i64 %mul
 }
@@ -477,11 +472,10 @@ entry:
 ; CHECK-NOWORKAROUND:	prfm
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
-define i32 @f_prefetch_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
+define i32 @f_prefetch_mul_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
 entry:
-  %0 = load i32, i32* %cp, align 4
-  %1 = bitcast i32* %e to i8*
-  tail call void @llvm.prefetch(i8* %1, i32 1, i32 3, i32 1)
+  %0 = load i32, ptr %cp, align 4
+  tail call void @llvm.prefetch(ptr %e, i32 1, i32 3, i32 1)
   %mul = mul nsw i32 %0, %b
   ret i32 %mul
 }
@@ -492,15 +486,15 @@ entry:
 ; CHECK-NOWORKAROUND:	prfm
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
-define i64 @fall_through(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
+define i64 @fall_through(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
 entry:
-  %0 = load i64, i64* %c, align 8
+  %0 = load i64, ptr %c, align 8
   br label %block1
 
 block1:
   %mul = mul nsw i64 %0, %b
   %add = add nsw i64 %mul, %a
-  %tmp = ptrtoint i8* blockaddress(@fall_through, %block1) to i64
+  %tmp = ptrtoint ptr blockaddress(@fall_through, %block1) to i64
   %ret = add nsw i64 %tmp, %add
   ret i64 %ret
 }
@@ -517,7 +511,7 @@ block1:
 ; CHECK-NOWORKAROUND-NEXT:	madd
 
 ; No checks for this, just check it doesn't crash
-define i32 @crash_check(i8** nocapture readnone %data) #0 {
+define i32 @crash_check(ptr nocapture readnone %data) #0 {
 entry:
   br label %while.cond
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
index 25ea3933c0064..eaa89081199ed 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
@@ -7,7 +7,7 @@
 %struct.c = type [256 x i64]
 
 declare void @foo()
-define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind {
+define i16 @halfword(ptr %ctx, i32 %xor72) nounwind {
 ; CHECK0-LABEL: halfword:
 ; CHECK0:       // %bb.0:
 ; CHECK0-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
@@ -41,14 +41,14 @@ define i16 @halfword(%struct.a* %ctx, i32 %xor72) nounwind {
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
-  %result = load i16, i16* %arrayidx86, align 2
+  %arrayidx86 = getelementptr inbounds %struct.a, ptr %ctx, i64 0, i64 %idxprom83
+  %result = load i16, ptr %arrayidx86, align 2
   call void @foo()
-  store i16 %result, i16* %arrayidx86, align 2
+  store i16 %result, ptr %arrayidx86, align 2
   ret i16 %result
 }
 
-define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind {
+define i32 @word(ptr %ctx, i32 %xor72) nounwind {
 ; CHECK0-LABEL: word:
 ; CHECK0:       // %bb.0:
 ; CHECK0-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
@@ -82,14 +82,14 @@ define i32 @word(%struct.b* %ctx, i32 %xor72) nounwind {
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
-  %result = load i32, i32* %arrayidx86, align 4
+  %arrayidx86 = getelementptr inbounds %struct.b, ptr %ctx, i64 0, i64 %idxprom83
+  %result = load i32, ptr %arrayidx86, align 4
   call void @foo()
-  store i32 %result, i32* %arrayidx86, align 4
+  store i32 %result, ptr %arrayidx86, align 4
   ret i32 %result
 }
 
-define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
+define i64 @doubleword(ptr %ctx, i32 %xor72) nounwind {
 ; CHECK0-LABEL: doubleword:
 ; CHECK0:       // %bb.0:
 ; CHECK0-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
@@ -123,10 +123,10 @@ define i64 @doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
-  %result = load i64, i64* %arrayidx86, align 8
+  %arrayidx86 = getelementptr inbounds %struct.c, ptr %ctx, i64 0, i64 %idxprom83
+  %result = load i64, ptr %arrayidx86, align 8
   call void @foo()
-  store i64 %result, i64* %arrayidx86, align 8
+  store i64 %result, ptr %arrayidx86, align 8
   ret i64 %result
 }
 
@@ -162,7 +162,7 @@ endbb:
  ret i64 %mul2
 }
 
-define i64 @gep3(i64 *%p, i64 %b) {
+define i64 @gep3(ptr %p, i64 %b) {
 ; CHECK0-LABEL: gep3:
 ; CHECK0:       // %bb.0:
 ; CHECK0-NEXT:    lsl x9, x1, #3
@@ -177,22 +177,22 @@ define i64 @gep3(i64 *%p, i64 %b) {
 ; CHECK3-NEXT:    ldr x0, [x0, x1, lsl #3]
 ; CHECK3-NEXT:    str x1, [x8, x1, lsl #3]
 ; CHECK3-NEXT:    ret
-  %g = getelementptr inbounds i64, i64* %p, i64 %b
-  %l = load i64, i64* %g
-  store i64 %b, i64* %g
+  %g = getelementptr inbounds i64, ptr %p, i64 %b
+  %l = load i64, ptr %g
+  store i64 %b, ptr %g
   ret i64 %l
 }
 
-define i128 @gep4(i128 *%p, i128 %a, i64 %b) {
+define i128 @gep4(ptr %p, i128 %a, i64 %b) {
 ; CHECK-LABEL: gep4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, x4, lsl #4
 ; CHECK-NEXT:    ldp x0, x1, [x8]
 ; CHECK-NEXT:    stp x2, x3, [x8]
 ; CHECK-NEXT:    ret
-  %g = getelementptr inbounds i128, i128* %p, i64 %b
-  %l = load i128, i128* %g
-  store i128 %a, i128* %g
+  %g = getelementptr inbounds i128, ptr %p, i64 %b
+  %l = load i128, ptr %g
+  store i128 %a, ptr %g
   ret i128 %l
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll b/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll
index 0337f04e57963..dd1bf14f5a569 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-insert-subvector-undef.ll
@@ -6,14 +6,14 @@
 
 define <8 x i16> @c(i32 %e) {
 entry:
-  %0 = load <4 x i16>, <4 x i16>* @d, align 8
+  %0 = load <4 x i16>, ptr @d, align 8
   %vminv = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %0)
   %1 = trunc i32 %vminv to i16
   %vecinit3 = insertelement <4 x i16> <i16 undef, i16 undef, i16 0, i16 0>, i16 %1, i32 1
   %call = tail call <8 x i16> @c(i32 0) #3
   %vgetq_lane = extractelement <8 x i16> %call, i32 0
   %vset_lane = insertelement <4 x i16> %vecinit3, i16 %vgetq_lane, i32 0
-  %call4 = tail call i32 bitcast (i32 (...)* @k to i32 (<4 x i16>)*)(<4 x i16> %vset_lane) #3
+  %call4 = tail call i32 @k(<4 x i16> %vset_lane) #3
   ret <8 x i16> undef
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
index b73be5c1c39ee..bc95cfd7d28d9 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefix CHECK-LE
 ; RUN: llc -mtriple=aarch64_be-unknown-linux-gnu < %s | FileCheck %s --check-prefix CHECK-BE
 
-define <2 x i16> @test0(i16* %i16_ptr, i64 %inc) {
+define <2 x i16> @test0(ptr %i16_ptr, i64 %inc) {
 ; CHECK-LE-LABEL: test0:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ld1 { v0.h }[0], [x0]
@@ -14,12 +14,12 @@ define <2 x i16> @test0(i16* %i16_ptr, i64 %inc) {
 ; CHECK-BE-NEXT:    ld1 { v0.h }[0], [x0]
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
-  %i_0 = load i16, i16* %i16_ptr
+  %i_0 = load i16, ptr %i16_ptr
   %v0 = insertelement <2 x i16> undef, i16 %i_0, i32 0
   ret <2 x i16> %v0
 }
 
-define <2 x i16> @test1(<2 x i16>* %v2i16_ptr) {
+define <2 x i16> @test1(ptr %v2i16_ptr) {
 ; CHECK-LE-LABEL: test1:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ld1 { v0.h }[0], [x0]
@@ -35,11 +35,11 @@ define <2 x i16> @test1(<2 x i16>* %v2i16_ptr) {
 ; CHECK-BE-NEXT:    ld1 { v0.h }[2], [x8]
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
-  %v2i16 = load <2 x i16>, <2 x i16>* %v2i16_ptr
+  %v2i16 = load <2 x i16>, ptr %v2i16_ptr
   ret <2 x i16> %v2i16
 }
 
-define <2 x i16> @test2(i16* %i16_ptr, i64 %inc) {
+define <2 x i16> @test2(ptr %i16_ptr, i64 %inc) {
 ; CHECK-LE-LABEL: test2:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ld1 { v0.h }[0], [x0]
@@ -55,15 +55,15 @@ define <2 x i16> @test2(i16* %i16_ptr, i64 %inc) {
 ; CHECK-BE-NEXT:    ld1 { v0.h }[2], [x8]
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
-  %i_0 = load i16, i16* %i16_ptr
-  %i16_ptr_inc = getelementptr i16, i16* %i16_ptr, i64 %inc
-  %i_1 = load i16, i16* %i16_ptr_inc
+  %i_0 = load i16, ptr %i16_ptr
+  %i16_ptr_inc = getelementptr i16, ptr %i16_ptr, i64 %inc
+  %i_1 = load i16, ptr %i16_ptr_inc
   %v0 = insertelement <2 x i16> undef, i16 %i_0, i32 0
   %v1 = insertelement <2 x i16> %v0, i16 %i_1, i32 1
   ret <2 x i16> %v1
 }
 
-define <2 x i8> @test3(<2 x i8>* %v2i8_ptr) {
+define <2 x i8> @test3(ptr %v2i8_ptr) {
 ; CHECK-LE-LABEL: test3:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ld1 { v0.b }[0], [x0]
@@ -79,11 +79,11 @@ define <2 x i8> @test3(<2 x i8>* %v2i8_ptr) {
 ; CHECK-BE-NEXT:    ld1 { v0.b }[4], [x8]
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
-  %v2i8 = load <2 x i8>, <2 x i8>* %v2i8_ptr
+  %v2i8 = load <2 x i8>, ptr %v2i8_ptr
   ret <2 x i8> %v2i8
 }
 
-define <4 x i8> @test4(<4 x i8>* %v4i8_ptr) {
+define <4 x i8> @test4(ptr %v4i8_ptr) {
 ; CHECK-LE-LABEL: test4:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -98,11 +98,11 @@ define <4 x i8> @test4(<4 x i8>* %v4i8_ptr) {
 ; CHECK-BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-BE-NEXT:    rev64 v0.4h, v0.4h
 ; CHECK-BE-NEXT:    ret
-  %v4i8 = load <4 x i8>, <4 x i8>* %v4i8_ptr
+  %v4i8 = load <4 x i8>, ptr %v4i8_ptr
   ret <4 x i8> %v4i8
 }
 
-define <2 x i32> @fsext_v2i32(<2 x i8>* %a) {
+define <2 x i32> @fsext_v2i32(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v2i32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldrsb w8, [x0]
@@ -120,12 +120,12 @@ define <2 x i32> @fsext_v2i32(<2 x i8>* %a) {
 ; CHECK-BE-NEXT:    mov v0.s[1], w8
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
-  %x = load <2 x i8>, <2 x i8>* %a
+  %x = load <2 x i8>, ptr %a
   %y = sext <2 x i8> %x to <2 x i32>
   ret <2 x i32> %y
 }
 
-define <3 x i32> @fsext_v3i32(<3 x i8>* %a) {
+define <3 x i32> @fsext_v3i32(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v3i32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -147,12 +147,12 @@ define <3 x i32> @fsext_v3i32(<3 x i8>* %a) {
 ; CHECK-BE-NEXT:    rev64 v0.4s, v0.4s
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
-  %x = load <3 x i8>, <3 x i8>* %a
+  %x = load <3 x i8>, ptr %a
   %y = sext <3 x i8> %x to <3 x i32>
   ret <3 x i32> %y
 }
 
-define <4 x i32> @fsext_v4i32(<4 x i8>* %a) {
+define <4 x i32> @fsext_v4i32(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v4i32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -169,12 +169,12 @@ define <4 x i32> @fsext_v4i32(<4 x i8>* %a) {
 ; CHECK-BE-NEXT:    rev64 v0.4s, v0.4s
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %a
+  %x = load <4 x i8>, ptr %a
   %y = sext <4 x i8> %x to <4 x i32>
   ret <4 x i32> %y
 }
 
-define <8 x i32> @fsext_v8i32(<8 x i8>* %a) {
+define <8 x i32> @fsext_v8i32(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v8i32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr d0, [x0]
@@ -194,12 +194,12 @@ define <8 x i32> @fsext_v8i32(<8 x i8>* %a) {
 ; CHECK-BE-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
-  %x = load <8 x i8>, <8 x i8>* %a
+  %x = load <8 x i8>, ptr %a
   %y = sext <8 x i8> %x to <8 x i32>
   ret <8 x i32> %y
 }
 
-define <4 x i32> @fzext_v4i32(<4 x i8>* %a) {
+define <4 x i32> @fzext_v4i32(ptr %a) {
 ; CHECK-LE-LABEL: fzext_v4i32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -216,7 +216,7 @@ define <4 x i32> @fzext_v4i32(<4 x i8>* %a) {
 ; CHECK-BE-NEXT:    rev64 v0.4s, v0.4s
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %a
+  %x = load <4 x i8>, ptr %a
   %y = zext <4 x i8> %x to <4 x i32>
   ret <4 x i32> %y
 }
@@ -224,7 +224,7 @@ define <4 x i32> @fzext_v4i32(<4 x i8>* %a) {
 ; TODO: This codegen could just be:
 ;   ldrb w0, [x0]
 ;
-define i32 @loadExti32(<4 x i8>* %ref) {
+define i32 @loadExti32(ptr %ref) {
 ; CHECK-LE-LABEL: loadExti32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -241,13 +241,13 @@ define i32 @loadExti32(<4 x i8>* %ref) {
 ; CHECK-BE-NEXT:    umov w8, v0.h[0]
 ; CHECK-BE-NEXT:    and w0, w8, #0xff
 ; CHECK-BE-NEXT:    ret
-  %a = load <4 x i8>, <4 x i8>* %ref
+  %a = load <4 x i8>, ptr %ref
   %vecext = extractelement <4 x i8> %a, i32 0
   %conv = zext i8 %vecext to i32
   ret i32 %conv
 }
 
-define <2 x i16> @fsext_v2i16(<2 x i8>* %a) {
+define <2 x i16> @fsext_v2i16(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v2i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldrsb w8, [x0]
@@ -265,12 +265,12 @@ define <2 x i16> @fsext_v2i16(<2 x i8>* %a) {
 ; CHECK-BE-NEXT:    mov v0.s[1], w8
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
-  %x = load <2 x i8>, <2 x i8>* %a
+  %x = load <2 x i8>, ptr %a
   %y = sext <2 x i8> %x to <2 x i16>
   ret <2 x i16> %y
 }
 
-define <3 x i16> @fsext_v3i16(<3 x i8>* %a) {
+define <3 x i16> @fsext_v3i16(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v3i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -289,12 +289,12 @@ define <3 x i16> @fsext_v3i16(<3 x i8>* %a) {
 ; CHECK-BE-NEXT:    sshr v0.4h, v0.4h, #8
 ; CHECK-BE-NEXT:    rev64 v0.4h, v0.4h
 ; CHECK-BE-NEXT:    ret
-  %x = load <3 x i8>, <3 x i8>* %a
+  %x = load <3 x i8>, ptr %a
   %y = sext <3 x i8> %x to <3 x i16>
   ret <3 x i16> %y
 }
 
-define <4 x i16> @fsext_v4i16(<4 x i8>* %a) {
+define <4 x i16> @fsext_v4i16(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v4i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -309,12 +309,12 @@ define <4 x i16> @fsext_v4i16(<4 x i8>* %a) {
 ; CHECK-BE-NEXT:    sshll v0.8h, v0.8b, #0
 ; CHECK-BE-NEXT:    rev64 v0.4h, v0.4h
 ; CHECK-BE-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %a
+  %x = load <4 x i8>, ptr %a
   %y = sext <4 x i8> %x to <4 x i16>
   ret <4 x i16> %y
 }
 
-define <8 x i16> @fsext_v8i16(<8 x i8>* %a) {
+define <8 x i16> @fsext_v8i16(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v8i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr d0, [x0]
@@ -328,12 +328,12 @@ define <8 x i16> @fsext_v8i16(<8 x i8>* %a) {
 ; CHECK-BE-NEXT:    rev64 v0.8h, v0.8h
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
-  %x = load <8 x i8>, <8 x i8>* %a
+  %x = load <8 x i8>, ptr %a
   %y = sext <8 x i8> %x to <8 x i16>
   ret <8 x i16> %y
 }
 
-define <16 x i16> @fsext_v16i16(<16 x i8>* %a) {
+define <16 x i16> @fsext_v16i16(ptr %a) {
 ; CHECK-LE-LABEL: fsext_v16i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr q0, [x0]
@@ -351,12 +351,12 @@ define <16 x i16> @fsext_v16i16(<16 x i8>* %a) {
 ; CHECK-BE-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
-  %x = load <16 x i8>, <16 x i8>* %a
+  %x = load <16 x i8>, ptr %a
   %y = sext <16 x i8> %x to <16 x i16>
   ret <16 x i16> %y
 }
 
-define <4 x i16> @fzext_v4i16(<4 x i8>* %a) {
+define <4 x i16> @fzext_v4i16(ptr %a) {
 ; CHECK-LE-LABEL: fzext_v4i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -371,12 +371,12 @@ define <4 x i16> @fzext_v4i16(<4 x i8>* %a) {
 ; CHECK-BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-BE-NEXT:    rev64 v0.4h, v0.4h
 ; CHECK-BE-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %a
+  %x = load <4 x i8>, ptr %a
   %y = zext <4 x i8> %x to <4 x i16>
   ret <4 x i16> %y
 }
 
-define <4 x i16> @anyext_v4i16(<4 x i8> *%a, <4 x i8> *%b) {
+define <4 x i16> @anyext_v4i16(ptr %a, ptr %b) {
 ; CHECK-LE-LABEL: anyext_v4i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -401,14 +401,14 @@ define <4 x i16> @anyext_v4i16(<4 x i8> *%a, <4 x i8> *%b) {
 ; CHECK-BE-NEXT:    sshr v0.4h, v0.4h, #8
 ; CHECK-BE-NEXT:    rev64 v0.4h, v0.4h
 ; CHECK-BE-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %a, align 4
-  %y = load <4 x i8>, <4 x i8>* %b, align 4
+  %x = load <4 x i8>, ptr %a, align 4
+  %y = load <4 x i8>, ptr %b, align 4
   %z = add <4 x i8> %x, %y
   %s = sext <4 x i8> %z to <4 x i16>
   ret <4 x i16> %s
 }
 
-define <4 x i32> @anyext_v4i32(<4 x i8> *%a, <4 x i8> *%b) {
+define <4 x i32> @anyext_v4i32(ptr %a, ptr %b) {
 ; CHECK-LE-LABEL: anyext_v4i32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
@@ -436,8 +436,8 @@ define <4 x i32> @anyext_v4i32(<4 x i8> *%a, <4 x i8> *%b) {
 ; CHECK-BE-NEXT:    rev64 v0.4s, v0.4s
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %a, align 4
-  %y = load <4 x i8>, <4 x i8>* %b, align 4
+  %x = load <4 x i8>, ptr %a, align 4
+  %y = load <4 x i8>, ptr %b, align 4
   %z = add <4 x i8> %x, %y
   %s = sext <4 x i8> %z to <4 x i32>
   ret <4 x i32> %s

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
index 4ff80296c0d99..f5d7d330b45c4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
@@ -21,208 +21,208 @@ declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
 
 ; CHECK-LABEL: smax_B
 ; CHECK: smaxv {{b[0-9]+}}, {{v[0-9]+}}.16b
-define i8 @smax_B(<16 x i8>* nocapture readonly %arr)  {
-  %arr.load = load <16 x i8>, <16 x i8>* %arr
+define i8 @smax_B(ptr nocapture readonly %arr)  {
+  %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
 ; CHECK-LABEL: smax_H
 ; CHECK: smaxv {{h[0-9]+}}, {{v[0-9]+}}.8h
-define i16 @smax_H(<8 x i16>* nocapture readonly %arr) {
-  %arr.load = load <8 x i16>, <8 x i16>* %arr
+define i16 @smax_H(ptr nocapture readonly %arr) {
+  %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
 ; CHECK-LABEL: smax_S
 ; CHECK: smaxv {{s[0-9]+}}, {{v[0-9]+}}.4s
-define i32 @smax_S(<4 x i32> * nocapture readonly %arr)  {
-  %arr.load = load <4 x i32>, <4 x i32>* %arr
+define i32 @smax_S(ptr nocapture readonly %arr)  {
+  %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
 ; CHECK-LABEL: umax_B
 ; CHECK: umaxv {{b[0-9]+}}, {{v[0-9]+}}.16b
-define i8 @umax_B(<16 x i8>* nocapture readonly %arr)  {
-  %arr.load = load <16 x i8>, <16 x i8>* %arr
+define i8 @umax_B(ptr nocapture readonly %arr)  {
+  %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
 ; CHECK-LABEL: umax_H
 ; CHECK: umaxv {{h[0-9]+}}, {{v[0-9]+}}.8h
-define i16 @umax_H(<8 x i16>* nocapture readonly %arr)  {
-  %arr.load = load <8 x i16>, <8 x i16>* %arr
+define i16 @umax_H(ptr nocapture readonly %arr)  {
+  %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
 ; CHECK-LABEL: umax_S
 ; CHECK: umaxv {{s[0-9]+}}, {{v[0-9]+}}.4s
-define i32 @umax_S(<4 x i32>* nocapture readonly %arr) {
-  %arr.load = load <4 x i32>, <4 x i32>* %arr
+define i32 @umax_S(ptr nocapture readonly %arr) {
+  %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
 ; CHECK-LABEL: smin_B
 ; CHECK: sminv {{b[0-9]+}}, {{v[0-9]+}}.16b
-define i8 @smin_B(<16 x i8>* nocapture readonly %arr) {
-  %arr.load = load <16 x i8>, <16 x i8>* %arr
+define i8 @smin_B(ptr nocapture readonly %arr) {
+  %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
 ; CHECK-LABEL: smin_H
 ; CHECK: sminv {{h[0-9]+}}, {{v[0-9]+}}.8h
-define i16 @smin_H(<8 x i16>* nocapture readonly %arr) {
-  %arr.load = load <8 x i16>, <8 x i16>* %arr
+define i16 @smin_H(ptr nocapture readonly %arr) {
+  %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
 ; CHECK-LABEL: smin_S
 ; CHECK: sminv {{s[0-9]+}}, {{v[0-9]+}}.4s
-define i32 @smin_S(<4 x i32>* nocapture readonly %arr) {
-  %arr.load = load <4 x i32>, <4 x i32>* %arr
+define i32 @smin_S(ptr nocapture readonly %arr) {
+  %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
 ; CHECK-LABEL: umin_B
 ; CHECK: uminv {{b[0-9]+}}, {{v[0-9]+}}.16b
-define i8 @umin_B(<16 x i8>* nocapture readonly %arr)  {
-  %arr.load = load <16 x i8>, <16 x i8>* %arr
+define i8 @umin_B(ptr nocapture readonly %arr)  {
+  %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
 ; CHECK-LABEL: umin_H
 ; CHECK: uminv {{h[0-9]+}}, {{v[0-9]+}}.8h
-define i16 @umin_H(<8 x i16>* nocapture readonly %arr)  {
-  %arr.load = load <8 x i16>, <8 x i16>* %arr
+define i16 @umin_H(ptr nocapture readonly %arr)  {
+  %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
 ; CHECK-LABEL: umin_S
 ; CHECK: uminv {{s[0-9]+}}, {{v[0-9]+}}.4s
-define i32 @umin_S(<4 x i32>* nocapture readonly %arr) {
-  %arr.load = load <4 x i32>, <4 x i32>* %arr
+define i32 @umin_S(ptr nocapture readonly %arr) {
+  %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
 ; CHECK-LABEL: fmaxnm_S
 ; CHECK: fmaxnmv
-define float @fmaxnm_S(<4 x float>* nocapture readonly %arr) {
-  %arr.load  = load <4 x float>, <4 x float>* %arr
+define float @fmaxnm_S(ptr nocapture readonly %arr) {
+  %arr.load  = load <4 x float>, ptr %arr
   %r = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %arr.load)
   ret float %r
 }
 
 ; CHECK-LABEL: fminnm_S
 ; CHECK: fminnmv
-define float @fminnm_S(<4 x float>* nocapture readonly %arr) {
-  %arr.load  = load <4 x float>, <4 x float>* %arr
+define float @fminnm_S(ptr nocapture readonly %arr) {
+  %arr.load  = load <4 x float>, ptr %arr
   %r = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %arr.load)
   ret float %r
 }
 
 declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>)
 
-define i16 @oversized_umax_256(<16 x i16>* nocapture readonly %arr)  {
+define i16 @oversized_umax_256(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_umax_256
 ; CHECK: umax [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 ; CHECK: umaxv {{h[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i16>, <16 x i16>* %arr
+  %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
 declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>)
 
-define i32 @oversized_umax_512(<16 x i32>* nocapture readonly %arr)  {
+define i32 @oversized_umax_512(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_umax_512
 ; CHECK: umax v
 ; CHECK-NEXT: umax v
 ; CHECK-NEXT: umax [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
 ; CHECK-NEXT: umaxv {{s[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i32>, <16 x i32>* %arr
+  %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }
 
 declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>)
 
-define i16 @oversized_umin_256(<16 x i16>* nocapture readonly %arr)  {
+define i16 @oversized_umin_256(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_umin_256
 ; CHECK: umin [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 ; CHECK: uminv {{h[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i16>, <16 x i16>* %arr
+  %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
 declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>)
 
-define i32 @oversized_umin_512(<16 x i32>* nocapture readonly %arr)  {
+define i32 @oversized_umin_512(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_umin_512
 ; CHECK: umin v
 ; CHECK-NEXT: umin v
 ; CHECK-NEXT: umin [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
 ; CHECK-NEXT: uminv {{s[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i32>, <16 x i32>* %arr
+  %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }
 
 declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>)
 
-define i16 @oversized_smax_256(<16 x i16>* nocapture readonly %arr)  {
+define i16 @oversized_smax_256(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_smax_256
 ; CHECK: smax [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 ; CHECK: smaxv {{h[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i16>, <16 x i16>* %arr
+  %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
 declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>)
 
-define i32 @oversized_smax_512(<16 x i32>* nocapture readonly %arr)  {
+define i32 @oversized_smax_512(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_smax_512
 ; CHECK: smax v
 ; CHECK-NEXT: smax v
 ; CHECK-NEXT: smax [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
 ; CHECK-NEXT: smaxv {{s[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i32>, <16 x i32>* %arr
+  %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }
 
 declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
 
-define i16 @oversized_smin_256(<16 x i16>* nocapture readonly %arr)  {
+define i16 @oversized_smin_256(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_smin_256
 ; CHECK: smin [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 ; CHECK: sminv {{h[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i16>, <16 x i16>* %arr
+  %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
 declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>)
 
-define i32 @oversized_smin_512(<16 x i32>* nocapture readonly %arr)  {
+define i32 @oversized_smin_512(ptr nocapture readonly %arr)  {
 ; CHECK-LABEL: oversized_smin_512
 ; CHECK: smin v
 ; CHECK-NEXT: smin v
 ; CHECK-NEXT: smin [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
 ; CHECK-NEXT: sminv {{s[0-9]+}}, [[V0]]
-  %arr.load = load <16 x i32>, <16 x i32>* %arr
+  %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
index 1b1ae0853d5f3..cea255b225212 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mops-consecutive.ll
@@ -2,9 +2,9 @@
 
 ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2 -mattr=+mops       | FileCheck %s --check-prefix=CHECK-MOPS
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
-declare void @fn(i8*, i8*)
+declare void @fn(ptr, ptr)
 
 define void @consecutive() {
 ; CHECK-MOPS-LABEL: consecutive:
@@ -42,28 +42,24 @@ define void @consecutive() {
 entry:
   %buf_from = alloca [1000 x i8], align 16
   %buf_to = alloca [1000 x i8], align 1
-  %0 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 0
-  %1 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_to, i64 0, i64 0
-  call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(1000) %1, i8 0, i64 1000, i1 false)
-  %2 = bitcast [1000 x i8]* %buf_from to <16 x i8>*
-  store <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8>* %2, align 16
-  %arrayidx.16 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 16
-  %3 = bitcast i8* %arrayidx.16 to <8 x i8>*
-  store <8 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, <8 x i8>* %3, align 16
-  %arrayidx.24 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 24
-  store i8 24, i8* %arrayidx.24, align 8
-  %arrayidx.25 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 25
-  store i8 25, i8* %arrayidx.25, align 1
-  %arrayidx.26 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 26
-  store i8 26, i8* %arrayidx.26, align 2
-  %arrayidx.27 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 27
-  store i8 27, i8* %arrayidx.27, align 1
-  %arrayidx.28 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 28
-  store i8 28, i8* %arrayidx.28, align 4
-  %arrayidx.29 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 29
-  store i8 29, i8* %arrayidx.29, align 1
-  %arrayidx.30 = getelementptr inbounds [1000 x i8], [1000 x i8]* %buf_from, i64 0, i64 30
-  store i8 30, i8* %arrayidx.30, align 2
-  call void @fn(i8* nonnull %0, i8* nonnull %1)
+  call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(1000) %buf_to, i8 0, i64 1000, i1 false)
+  store <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, ptr %buf_from, align 16
+  %arrayidx.16 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 16
+  store <8 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23>, ptr %arrayidx.16, align 16
+  %arrayidx.24 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 24
+  store i8 24, ptr %arrayidx.24, align 8
+  %arrayidx.25 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 25
+  store i8 25, ptr %arrayidx.25, align 1
+  %arrayidx.26 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 26
+  store i8 26, ptr %arrayidx.26, align 2
+  %arrayidx.27 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 27
+  store i8 27, ptr %arrayidx.27, align 1
+  %arrayidx.28 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 28
+  store i8 28, ptr %arrayidx.28, align 4
+  %arrayidx.29 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 29
+  store i8 29, ptr %arrayidx.29, align 1
+  %arrayidx.30 = getelementptr inbounds [1000 x i8], ptr %buf_from, i64 0, i64 30
+  store i8 30, ptr %arrayidx.30, align 2
+  call void @fn(ptr nonnull %buf_from, ptr nonnull %buf_to)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll b/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll
index 5b71648ac1a92..1fe1308d8b351 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mops-mte.ll
@@ -4,9 +4,9 @@
 ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi     -global-isel=1 -global-isel-abort=1 -mattr=+mops,+mte  | FileCheck %s --check-prefix=GISel
 ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2 -mattr=+mops,+mte  | FileCheck %s --check-prefix=SDAG
 
-declare i8* @llvm.aarch64.mops.memset.tag(i8*, i8, i64)
+declare ptr @llvm.aarch64.mops.memset.tag(ptr, i8, i64)
 
-define i8* @memset_tagged_0_zeroval(i8* %dst, i64 %size) {
+define ptr @memset_tagged_0_zeroval(ptr %dst, i64 %size) {
 ; GISel-O0-LABEL: memset_tagged_0_zeroval:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    mov x8, xzr
@@ -31,11 +31,11 @@ define i8* @memset_tagged_0_zeroval(i8* %dst, i64 %size) {
 ; SDAG-NEXT:    setge [x0]!, x8!, xzr
 ; SDAG-NEXT:    ret
 entry:
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 0)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 0)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_1_zeroval(i8* %dst, i64 %size) {
+define ptr @memset_tagged_1_zeroval(ptr %dst, i64 %size) {
 ; GISel-O0-LABEL: memset_tagged_1_zeroval:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    mov x9, xzr
@@ -62,11 +62,11 @@ define i8* @memset_tagged_1_zeroval(i8* %dst, i64 %size) {
 ; SDAG-NEXT:    setge [x0]!, x8!, xzr
 ; SDAG-NEXT:    ret
 entry:
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 1)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 1)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_10_zeroval(i8* %dst, i64 %size) {
+define ptr @memset_tagged_10_zeroval(ptr %dst, i64 %size) {
 ; GISel-O0-LABEL: memset_tagged_10_zeroval:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    mov x9, xzr
@@ -93,11 +93,11 @@ define i8* @memset_tagged_10_zeroval(i8* %dst, i64 %size) {
 ; SDAG-NEXT:    setge [x0]!, x8!, xzr
 ; SDAG-NEXT:    ret
 entry:
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 10)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 10)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_10000_zeroval(i8* %dst, i64 %size) {
+define ptr @memset_tagged_10000_zeroval(ptr %dst, i64 %size) {
 ; GISel-O0-LABEL: memset_tagged_10000_zeroval:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    mov x9, xzr
@@ -124,11 +124,11 @@ define i8* @memset_tagged_10000_zeroval(i8* %dst, i64 %size) {
 ; SDAG-NEXT:    setge [x0]!, x8!, xzr
 ; SDAG-NEXT:    ret
 entry:
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 10000)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 10000)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_size_zeroval(i8* %dst, i64 %size) {
+define ptr @memset_tagged_size_zeroval(ptr %dst, i64 %size) {
 ; GISel-O0-LABEL: memset_tagged_size_zeroval:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    mov x8, xzr
@@ -151,11 +151,11 @@ define i8* @memset_tagged_size_zeroval(i8* %dst, i64 %size) {
 ; SDAG-NEXT:    setge [x0]!, x1!, xzr
 ; SDAG-NEXT:    ret
 entry:
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 0, i64 %size)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 0, i64 %size)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_0(i8* %dst, i64 %size, i32 %value) {
+define ptr @memset_tagged_0(ptr %dst, i64 %size, i32 %value) {
 ; GISel-O0-LABEL: memset_tagged_0:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    // implicit-def: $x9
@@ -185,11 +185,11 @@ define i8* @memset_tagged_0(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 0)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 0)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_1(i8* %dst, i64 %size, i32 %value) {
+define ptr @memset_tagged_1(ptr %dst, i64 %size, i32 %value) {
 ; GISel-O0-LABEL: memset_tagged_1:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    // implicit-def: $x9
@@ -220,11 +220,11 @@ define i8* @memset_tagged_1(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 1)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 1)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_10(i8* %dst, i64 %size, i32 %value) {
+define ptr @memset_tagged_10(ptr %dst, i64 %size, i32 %value) {
 ; GISel-O0-LABEL: memset_tagged_10:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    // implicit-def: $x9
@@ -255,11 +255,11 @@ define i8* @memset_tagged_10(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 10)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 10)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_10000(i8* %dst, i64 %size, i32 %value) {
+define ptr @memset_tagged_10000(ptr %dst, i64 %size, i32 %value) {
 ; GISel-O0-LABEL: memset_tagged_10000:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    // implicit-def: $x9
@@ -290,11 +290,11 @@ define i8* @memset_tagged_10000(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 10000)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 10000)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_size(i8* %dst, i64 %size, i32 %value) {
+define ptr @memset_tagged_size(ptr %dst, i64 %size, i32 %value) {
 ; GISel-O0-LABEL: memset_tagged_size:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    // implicit-def: $x8
@@ -321,11 +321,11 @@ define i8* @memset_tagged_size(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* %dst, i8 %value_trunc, i64 %size)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr %dst, i8 %value_trunc, i64 %size)
+  ret ptr %r
 }
 
-define i8* @memset_tagged_size_aligned(i8* %dst, i64 %size, i32 %value) {
+define ptr @memset_tagged_size_aligned(ptr %dst, i64 %size, i32 %value) {
 ; GISel-O0-LABEL: memset_tagged_size_aligned:
 ; GISel-O0:       // %bb.0: // %entry
 ; GISel-O0-NEXT:    // implicit-def: $x8
@@ -352,6 +352,6 @@ define i8* @memset_tagged_size_aligned(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  %r = tail call i8* @llvm.aarch64.mops.memset.tag(i8* align 16 %dst, i8 %value_trunc, i64 %size)
-  ret i8* %r
+  %r = tail call ptr @llvm.aarch64.mops.memset.tag(ptr align 16 %dst, i8 %value_trunc, i64 %size)
+  ret ptr %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mops.ll b/llvm/test/CodeGen/AArch64/aarch64-mops.ll
index 5a880d3e59aeb..e342f37ebe479 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mops.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mops.ll
@@ -7,15 +7,15 @@
 ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2                    | FileCheck %s --check-prefix=SDAG-WITHOUT-MOPS-O2
 ; RUN: llc %s -o - -mtriple=aarch64-arm-none-eabi -O2 -mattr=+mops       | FileCheck %s --check-prefix=SDAG-MOPS-O2
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
 
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
 
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1 immarg)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
 
-define void @memset_0_zeroval(i8* %dst) {
+define void @memset_0_zeroval(ptr %dst) {
 ; GISel-WITHOUT-MOPS-LABEL: memset_0_zeroval:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -32,11 +32,11 @@ define void @memset_0_zeroval(i8* %dst) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 false)
   ret void
 }
 
-define void @memset_0_zeroval_volatile(i8* %dst) {
+define void @memset_0_zeroval_volatile(ptr %dst) {
 ; GISel-WITHOUT-MOPS-LABEL: memset_0_zeroval_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -53,11 +53,11 @@ define void @memset_0_zeroval_volatile(i8* %dst) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 true)
   ret void
 }
 
-define void @memset_10_zeroval(i8* %dst) {
+define void @memset_10_zeroval(ptr %dst) {
 ; GISel-WITHOUT-MOPS-LABEL: memset_10_zeroval:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    str xzr, [x0]
@@ -82,11 +82,11 @@ define void @memset_10_zeroval(i8* %dst) {
 ; SDAG-MOPS-O2-NEXT:    str xzr, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10, i1 false)
   ret void
 }
 
-define void @memset_10_zeroval_volatile(i8* %dst) {
+define void @memset_10_zeroval_volatile(ptr %dst) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_zeroval_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -140,11 +140,11 @@ define void @memset_10_zeroval_volatile(i8* %dst) {
 ; SDAG-MOPS-O2-NEXT:    str xzr, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10, i1 true)
   ret void
 }
 
-define void @memset_10000_zeroval(i8* %dst) {
+define void @memset_10000_zeroval(ptr %dst) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000_zeroval:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -205,11 +205,11 @@ define void @memset_10000_zeroval(i8* %dst) {
 ; SDAG-MOPS-O2-NEXT:    sete [x0]!, x8!, xzr
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10000, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10000, i1 false)
   ret void
 }
 
-define void @memset_10000_zeroval_volatile(i8* %dst) {
+define void @memset_10000_zeroval_volatile(ptr %dst) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000_zeroval_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -270,11 +270,11 @@ define void @memset_10000_zeroval_volatile(i8* %dst) {
 ; SDAG-MOPS-O2-NEXT:    sete [x0]!, x8!, xzr
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 10000, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 10000, i1 true)
   ret void
 }
 
-define void @memset_size_zeroval(i8* %dst, i64 %size) {
+define void @memset_size_zeroval(ptr %dst, i64 %size) {
 ; GISel-WITHOUT-MOPS-LABEL: memset_size_zeroval:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -319,11 +319,11 @@ define void @memset_size_zeroval(i8* %dst, i64 %size) {
 ; SDAG-MOPS-O2-NEXT:    sete [x0]!, x1!, xzr
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %size, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %size, i1 false)
   ret void
 }
 
-define void @memset_size_zeroval_volatile(i8* %dst, i64 %size) {
+define void @memset_size_zeroval_volatile(ptr %dst, i64 %size) {
 ; GISel-WITHOUT-MOPS-LABEL: memset_size_zeroval_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -368,12 +368,12 @@ define void @memset_size_zeroval_volatile(i8* %dst, i64 %size) {
 ; SDAG-MOPS-O2-NEXT:    sete [x0]!, x1!, xzr
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 %size, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 %size, i1 true)
   ret void
 }
 
 
-define void @memset_0(i8* %dst, i32 %value) {
+define void @memset_0(ptr %dst, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memset_0:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -391,11 +391,11 @@ define void @memset_0(i8* %dst, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 false)
   ret void
 }
 
-define void @memset_0_volatile(i8* %dst, i32 %value) {
+define void @memset_0_volatile(ptr %dst, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memset_0_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -413,11 +413,11 @@ define void @memset_0_volatile(i8* %dst, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 0, i64 0, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 0, i64 0, i1 true)
   ret void
 }
 
-define void @memset_10(i8* %dst, i32 %value) {
+define void @memset_10(ptr %dst, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    // implicit-def: $x8
@@ -483,11 +483,11 @@ define void @memset_10(i8* %dst, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10, i1 false)
   ret void
 }
 
-define void @memset_10_volatile(i8* %dst, i32 %value) {
+define void @memset_10_volatile(ptr %dst, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -550,11 +550,11 @@ define void @memset_10_volatile(i8* %dst, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10, i1 true)
   ret void
 }
 
-define void @memset_10000(i8* %dst, i32 %value) {
+define void @memset_10000(ptr %dst, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -616,11 +616,11 @@ define void @memset_10000(i8* %dst, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10000, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10000, i1 false)
   ret void
 }
 
-define void @memset_10000_volatile(i8* %dst, i32 %value) {
+define void @memset_10000_volatile(ptr %dst, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_10000_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -682,11 +682,11 @@ define void @memset_10000_volatile(i8* %dst, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 10000, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 10000, i1 true)
   ret void
 }
 
-define void @memset_size(i8* %dst, i64 %size, i32 %value) {
+define void @memset_size(ptr %dst, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    sub sp, sp, #32
@@ -751,11 +751,11 @@ define void @memset_size(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 %size, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 %size, i1 false)
   ret void
 }
 
-define void @memset_size_volatile(i8* %dst, i64 %size, i32 %value) {
+define void @memset_size_volatile(ptr %dst, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memset_size_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    sub sp, sp, #32
@@ -820,12 +820,12 @@ define void @memset_size_volatile(i8* %dst, i64 %size, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
   %value_trunc = trunc i32 %value to i8
-  call void @llvm.memset.p0i8.i64(i8* align 1 %dst, i8 %value_trunc, i64 %size, i1 true)
+  call void @llvm.memset.p0.i64(ptr align 1 %dst, i8 %value_trunc, i64 %size, i1 true)
   ret void
 }
 
 
-define void @memcpy_0(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_0(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_0:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -842,11 +842,11 @@ define void @memcpy_0(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 false)
   ret void
 }
 
-define void @memcpy_0_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_0_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_0_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -863,11 +863,11 @@ define void @memcpy_0_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 true)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 true)
   ret void
 }
 
-define void @memcpy_10(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_10(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_10:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ldr x8, [x1]
@@ -900,11 +900,11 @@ define void @memcpy_10(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    str x9, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 false)
   ret void
 }
 
-define void @memcpy_10_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_10_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_10_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -959,11 +959,11 @@ define void @memcpy_10_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    str x8, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 true)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 true)
   ret void
 }
 
-define void @memcpy_1000(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_1000(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_1000:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1020,11 +1020,11 @@ define void @memcpy_1000(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpyfe [x0]!, [x1]!, x8!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 false)
   ret void
 }
 
-define void @memcpy_1000_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_1000_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_1000_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1081,11 +1081,11 @@ define void @memcpy_1000_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpyfe [x0]!, [x1]!, x8!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 true)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 true)
   ret void
 }
 
-define void @memcpy_n(i8* %dst, i8* %src, i64 %size, i32 %value) {
+define void @memcpy_n(ptr %dst, ptr %src, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_n:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1118,11 +1118,11 @@ define void @memcpy_n(i8* %dst, i8* %src, i64 %size, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpyfe [x0]!, [x1]!, x2!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 false)
   ret void
 }
 
-define void @memcpy_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) {
+define void @memcpy_n_volatile(ptr %dst, ptr %src, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_n_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1155,12 +1155,12 @@ define void @memcpy_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpyfe [x0]!, [x1]!, x2!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 true)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 true)
   ret void
 }
 
 
-define void @memcpy_inline_0(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_inline_0(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_0:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -1177,11 +1177,11 @@ define void @memcpy_inline_0(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 false)
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 false)
   ret void
 }
 
-define void @memcpy_inline_0_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_inline_0_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_0_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -1198,11 +1198,11 @@ define void @memcpy_inline_0_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 true)
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 true)
   ret void
 }
 
-define void @memcpy_inline_10(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_inline_10(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_10:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ldr x8, [x1]
@@ -1235,11 +1235,11 @@ define void @memcpy_inline_10(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    str x9, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 false)
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 false)
   ret void
 }
 
-define void @memcpy_inline_10_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_inline_10_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_10_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ldr x8, [x1]
@@ -1272,11 +1272,11 @@ define void @memcpy_inline_10_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    str x8, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 true)
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 true)
   ret void
 }
 
-define void @memcpy_inline_300(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_inline_300(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memcpy_inline_300:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    ldr q0, [x1]
@@ -1489,11 +1489,11 @@ define void @memcpy_inline_300(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpyfe [x0]!, [x1]!, x8!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 300, i1 false)
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 300, i1 false)
   ret void
 }
 
-define void @memcpy_inline_300_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memcpy_inline_300_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memcpy_inline_300_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ldr q0, [x1]
@@ -1634,11 +1634,11 @@ define void @memcpy_inline_300_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpyfe [x0]!, [x1]!, x8!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 300, i1 true)
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 300, i1 true)
   ret void
 }
 
-define void @memmove_0(i8* %dst, i8* %src, i32 %value) {
+define void @memmove_0(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memmove_0:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -1655,11 +1655,11 @@ define void @memmove_0(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 false)
   ret void
 }
 
-define void @memmove_0_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memmove_0_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memmove_0_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    ret
@@ -1676,11 +1676,11 @@ define void @memmove_0_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2:       // %bb.0: // %entry
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 0, i1 true)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 0, i1 true)
   ret void
 }
 
-define void @memmove_10(i8* %dst, i8* %src, i32 %value) {
+define void @memmove_10(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_10:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    ldr x9, [x1]
@@ -1729,11 +1729,11 @@ define void @memmove_10(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    str x9, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 false)
   ret void
 }
 
-define void @memmove_10_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memmove_10_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_10_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1788,11 +1788,11 @@ define void @memmove_10_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    str x8, [x0]
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 10, i1 true)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 10, i1 true)
   ret void
 }
 
-define void @memmove_1000(i8* %dst, i8* %src, i32 %value) {
+define void @memmove_1000(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_1000:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1849,11 +1849,11 @@ define void @memmove_1000(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpye [x0]!, [x1]!, x8!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 false)
   ret void
 }
 
-define void @memmove_1000_volatile(i8* %dst, i8* %src, i32 %value) {
+define void @memmove_1000_volatile(ptr %dst, ptr %src, i32 %value) {
 ; GISel-WITHOUT-MOPS-O0-LABEL: memmove_1000_volatile:
 ; GISel-WITHOUT-MOPS-O0:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-O0-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1910,11 +1910,11 @@ define void @memmove_1000_volatile(i8* %dst, i8* %src, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpye [x0]!, [x1]!, x8!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 1000, i1 true)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 1000, i1 true)
   ret void
 }
 
-define void @memmove_n(i8* %dst, i8* %src, i64 %size, i32 %value) {
+define void @memmove_n(ptr %dst, ptr %src, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memmove_n:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1947,11 +1947,11 @@ define void @memmove_n(i8* %dst, i8* %src, i64 %size, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpye [x0]!, [x1]!, x2!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 false)
   ret void
 }
 
-define void @memmove_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) {
+define void @memmove_n_volatile(ptr %dst, ptr %src, i64 %size, i32 %value) {
 ; GISel-WITHOUT-MOPS-LABEL: memmove_n_volatile:
 ; GISel-WITHOUT-MOPS:       // %bb.0: // %entry
 ; GISel-WITHOUT-MOPS-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -1984,6 +1984,6 @@ define void @memmove_n_volatile(i8* %dst, i8* %src, i64 %size, i32 %value) {
 ; SDAG-MOPS-O2-NEXT:    cpye [x0]!, [x1]!, x2!
 ; SDAG-MOPS-O2-NEXT:    ret
 entry:
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 %dst, i8* align 1 %src, i64 %size, i1 true)
+  call void @llvm.memmove.p0.p0.i64(ptr align 1 %dst, ptr align 1 %src, i64 %size, i1 true)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
index 6172cb0df142c..71db09f770624 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mull-masks.ll
@@ -77,7 +77,7 @@ entry:
   ret i64 %mul
 }
 
-define i64 @smull_ldrsb_b(i8* %x0, i8 %x1) {
+define i64 @smull_ldrsb_b(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smull_ldrsb_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -86,14 +86,14 @@ define i64 @smull_ldrsb_b(i8* %x0, i8 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i8 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsb_b_commuted(i8* %x0, i8 %x1) {
+define i64 @smull_ldrsb_b_commuted(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smull_ldrsb_b_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -102,14 +102,14 @@ define i64 @smull_ldrsb_b_commuted(i8* %x0, i8 %x1) {
 ; CHECK-NEXT:    smull x0, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i8 %x1 to i64
   %mul = mul i64 %sext4, %sext
   ret i64 %mul
 }
 
-define i64 @smull_ldrsb_h(i8* %x0, i16 %x1) {
+define i64 @smull_ldrsb_h(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: smull_ldrsb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -118,28 +118,28 @@ define i64 @smull_ldrsb_h(i8* %x0, i16 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsb_w(i8* %x0, i32 %x1) {
+define i64 @smull_ldrsb_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: smull_ldrsb_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
 ; CHECK-NEXT:    smull x0, w8, w1
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsh_b(i16* %x0, i8 %x1) {
+define i64 @smull_ldrsh_b(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smull_ldrsh_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
@@ -148,14 +148,14 @@ define i64 @smull_ldrsh_b(i16* %x0, i8 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i8 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsh_h(i16* %x0, i16 %x1) {
+define i64 @smull_ldrsh_h(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: smull_ldrsh_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
@@ -164,14 +164,14 @@ define i64 @smull_ldrsh_h(i16* %x0, i16 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsh_h_commuted(i16* %x0, i16 %x1) {
+define i64 @smull_ldrsh_h_commuted(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: smull_ldrsh_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
@@ -180,28 +180,28 @@ define i64 @smull_ldrsh_h_commuted(i16* %x0, i16 %x1) {
 ; CHECK-NEXT:    smull x0, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext4, %sext
   ret i64 %mul
 }
 
-define i64 @smull_ldrsh_w(i16* %x0, i32 %x1) {
+define i64 @smull_ldrsh_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: smull_ldrsh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
 ; CHECK-NEXT:    smull x0, w8, w1
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_b(i32* %x0, i8 %x1) {
+define i64 @smull_ldrsw_b(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smull_ldrsw_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -210,14 +210,14 @@ define i64 @smull_ldrsw_b(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext4 = sext i8 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_h(i32* %x0, i16 %x1) {
+define i64 @smull_ldrsw_h(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: smull_ldrsw_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -226,35 +226,35 @@ define i64 @smull_ldrsw_h(i32* %x0, i16 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_w(i32* %x0, i32 %x1) {
+define i64 @smull_ldrsw_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: smull_ldrsw_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
 ; CHECK-NEXT:    smull x0, w8, w1
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext, %sext4
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_w_commuted(i32* %x0, i32 %x1) {
+define i64 @smull_ldrsw_w_commuted(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: smull_ldrsw_w_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
 ; CHECK-NEXT:    smull x0, w8, w1
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext4, %sext
@@ -277,7 +277,7 @@ entry:
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_shift(i32* %x0, i64 %x1) {
+define i64 @smull_ldrsw_shift(ptr %x0, i64 %x1) {
 ; CHECK-LABEL: smull_ldrsw_shift:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -285,7 +285,7 @@ define i64 @smull_ldrsw_shift(i32* %x0, i64 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %shl = shl i64 %x1, 32
   %shr = ashr exact i64 %shl, 32
@@ -293,7 +293,7 @@ entry:
   ret i64 %mul
 }
 
-define i64 @smull_ldrsh_zextw(i16* %x0, i32 %x1) {
+define i64 @smull_ldrsh_zextw(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: smull_ldrsh_zextw:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
@@ -301,14 +301,14 @@ define i64 @smull_ldrsh_zextw(i16* %x0, i32 %x1) {
 ; CHECK-NEXT:    mul x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %zext = zext i32 %x1 to i64
   %mul = mul i64 %sext, %zext
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_zexth(i32* %x0, i16 %x1) {
+define i64 @smull_ldrsw_zexth(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: smull_ldrsw_zexth:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -317,14 +317,14 @@ define i64 @smull_ldrsw_zexth(i32* %x0, i16 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %zext = zext i16 %x1 to i64
   %mul = mul i64 %sext, %zext
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_zextb(i32* %x0, i8 %x1) {
+define i64 @smull_ldrsw_zextb(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smull_ldrsw_zextb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -333,14 +333,14 @@ define i64 @smull_ldrsw_zextb(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %zext = zext i8 %x1 to i64
   %mul = mul i64 %sext, %zext
   ret i64 %mul
 }
 
-define i64 @smull_ldrsw_zextb_commuted(i32* %x0, i8 %x1) {
+define i64 @smull_ldrsw_zextb_commuted(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smull_ldrsw_zextb_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -349,14 +349,14 @@ define i64 @smull_ldrsw_zextb_commuted(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    smull x0, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %zext = zext i8 %x1 to i64
   %mul = mul i64 %zext, %sext
   ret i64 %mul
 }
 
-define i64 @smaddl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @smaddl_ldrsb_h(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -365,7 +365,7 @@ define i64 @smaddl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext, %sext4
@@ -373,7 +373,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @smaddl_ldrsb_h_commuted(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsb_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -382,7 +382,7 @@ define i64 @smaddl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smaddl x0, w9, w8, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext4, %sext
@@ -390,14 +390,14 @@ entry:
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsh_w(i16* %x0, i32 %x1, i64 %x2) {
+define i64 @smaddl_ldrsh_w(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
 ; CHECK-NEXT:    smaddl x0, w8, w1, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext, %sext4
@@ -405,14 +405,14 @@ entry:
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsh_w_commuted(i16* %x0, i32 %x1, i64 %x2) {
+define i64 @smaddl_ldrsh_w_commuted(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsh_w_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
 ; CHECK-NEXT:    smaddl x0, w8, w1, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext4, %sext
@@ -420,7 +420,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @smaddl_ldrsw_b(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsw_b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -428,7 +428,7 @@ define i64 @smaddl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    sxtb x9, w1
 ; CHECK-NEXT:    smaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i8 %x1 to i64
   %mul = mul i64 %sext, %sext2
@@ -436,7 +436,7 @@ define i64 @smaddl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) {
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @smaddl_ldrsw_b_commuted(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsw_b_commuted:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -444,7 +444,7 @@ define i64 @smaddl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    sxtb x9, w1
 ; CHECK-NEXT:    smaddl x0, w9, w8, x2
 ; CHECK-NEXT:    ret
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i8 %x1 to i64
   %mul = mul i64 %sext2, %sext
@@ -452,7 +452,7 @@ define i64 @smaddl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) {
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) {
+define i64 @smaddl_ldrsw_ldrsw(ptr %x0, ptr %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsw_ldrsw:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -460,8 +460,8 @@ define i64 @smaddl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) {
 ; CHECK-NEXT:    smaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
-  %ext64_2 = load i32, i32* %x1
+  %ext64 = load i32, ptr %x0
+  %ext64_2 = load i32, ptr %x1
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i32 %ext64_2 to i64
   %mul = mul i64 %sext, %sext2
@@ -486,7 +486,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) {
+define i64 @smaddl_ldrsw_shift(ptr %x0, i64 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsw_shift:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -494,7 +494,7 @@ define i64 @smaddl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) {
 ; CHECK-NEXT:    smaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %shl = shl i64 %x1, 32
   %shr = ashr exact i64 %shl, 32
@@ -503,7 +503,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @smaddl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @smaddl_ldrsw_zextb(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: smaddl_ldrsw_zextb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -512,7 +512,7 @@ define i64 @smaddl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    smaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %zext = zext i8 %x1 to i64
   %mul = mul i64 %sext, %zext
@@ -520,7 +520,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @smnegl_ldrsb_h(i8* %x0, i16 %x1) {
+define i64 @smnegl_ldrsb_h(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: smnegl_ldrsb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -529,7 +529,7 @@ define i64 @smnegl_ldrsb_h(i8* %x0, i16 %x1) {
 ; CHECK-NEXT:    smnegl x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext, %sext4
@@ -537,7 +537,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsb_h_commuted(i8* %x0, i16 %x1) {
+define i64 @smnegl_ldrsb_h_commuted(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: smnegl_ldrsb_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -546,7 +546,7 @@ define i64 @smnegl_ldrsb_h_commuted(i8* %x0, i16 %x1) {
 ; CHECK-NEXT:    smnegl x0, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext4, %sext
@@ -554,14 +554,14 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsh_w(i16* %x0, i32 %x1) {
+define i64 @smnegl_ldrsh_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: smnegl_ldrsh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
 ; CHECK-NEXT:    smnegl x0, w8, w1
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext, %sext4
@@ -569,14 +569,14 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsh_w_commuted(i16* %x0, i32 %x1) {
+define i64 @smnegl_ldrsh_w_commuted(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: smnegl_ldrsh_w_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
 ; CHECK-NEXT:    smnegl x0, w8, w1
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext4, %sext
@@ -584,7 +584,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsw_b(i32* %x0, i8 %x1) {
+define i64 @smnegl_ldrsw_b(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smnegl_ldrsw_b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -592,7 +592,7 @@ define i64 @smnegl_ldrsw_b(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    sxtb x9, w1
 ; CHECK-NEXT:    smnegl x0, w8, w9
 ; CHECK-NEXT:    ret
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i8 %x1 to i64
   %mul = mul i64 %sext, %sext2
@@ -600,7 +600,7 @@ define i64 @smnegl_ldrsw_b(i32* %x0, i8 %x1) {
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsw_b_commuted(i32* %x0, i8 %x1) {
+define i64 @smnegl_ldrsw_b_commuted(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smnegl_ldrsw_b_commuted:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -608,7 +608,7 @@ define i64 @smnegl_ldrsw_b_commuted(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    sxtb x9, w1
 ; CHECK-NEXT:    smnegl x0, w9, w8
 ; CHECK-NEXT:    ret
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i8 %x1 to i64
   %mul = mul i64 %sext2, %sext
@@ -616,7 +616,7 @@ define i64 @smnegl_ldrsw_b_commuted(i32* %x0, i8 %x1) {
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsw_ldrsw(i32* %x0, i32* %x1) {
+define i64 @smnegl_ldrsw_ldrsw(ptr %x0, ptr %x1) {
 ; CHECK-LABEL: smnegl_ldrsw_ldrsw:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -624,8 +624,8 @@ define i64 @smnegl_ldrsw_ldrsw(i32* %x0, i32* %x1) {
 ; CHECK-NEXT:    smnegl x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
-  %ext64_2 = load i32, i32* %x1
+  %ext64 = load i32, ptr %x0
+  %ext64_2 = load i32, ptr %x1
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i32 %ext64_2 to i64
   %mul = mul i64 %sext, %sext2
@@ -650,7 +650,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsw_shift(i32* %x0, i64 %x1) {
+define i64 @smnegl_ldrsw_shift(ptr %x0, i64 %x1) {
 ; CHECK-LABEL: smnegl_ldrsw_shift:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -658,7 +658,7 @@ define i64 @smnegl_ldrsw_shift(i32* %x0, i64 %x1) {
 ; CHECK-NEXT:    smnegl x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %shl = shl i64 %x1, 32
   %shr = ashr exact i64 %shl, 32
@@ -667,7 +667,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smnegl_ldrsw_zextb(i32* %x0, i8 %x1) {
+define i64 @smnegl_ldrsw_zextb(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: smnegl_ldrsw_zextb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -676,7 +676,7 @@ define i64 @smnegl_ldrsw_zextb(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    smnegl x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %zext = zext i8 %x1 to i64
   %mul = mul i64 %sext, %zext
@@ -684,7 +684,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @smsubl_ldrsb_h(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -693,7 +693,7 @@ define i64 @smsubl_ldrsb_h(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smsubl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext, %sext4
@@ -701,7 +701,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @smsubl_ldrsb_h_commuted(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsb_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb x8, [x0]
@@ -710,7 +710,7 @@ define i64 @smsubl_ldrsb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smsubl x0, w9, w8, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %sext = sext i8 %ext64 to i64
   %sext4 = sext i16 %x1 to i64
   %mul = mul i64 %sext4, %sext
@@ -718,14 +718,14 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsh_w(i16* %x0, i32 %x1, i64 %x2) {
+define i64 @smsubl_ldrsh_w(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
 ; CHECK-NEXT:    smsubl x0, w8, w1, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext, %sext4
@@ -733,14 +733,14 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsh_w_commuted(i16* %x0, i32 %x1, i64 %x2) {
+define i64 @smsubl_ldrsh_w_commuted(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsh_w_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh x8, [x0]
 ; CHECK-NEXT:    smsubl x0, w8, w1, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %sext = sext i16 %ext64 to i64
   %sext4 = sext i32 %x1 to i64
   %mul = mul i64 %sext4, %sext
@@ -748,7 +748,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @smsubl_ldrsw_b(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsw_b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -756,7 +756,7 @@ define i64 @smsubl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    sxtb x9, w1
 ; CHECK-NEXT:    smsubl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i8 %x1 to i64
   %mul = mul i64 %sext, %sext2
@@ -764,7 +764,7 @@ define i64 @smsubl_ldrsw_b(i32* %x0, i8 %x1, i64 %x2) {
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @smsubl_ldrsw_b_commuted(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsw_b_commuted:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -772,7 +772,7 @@ define i64 @smsubl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    sxtb x9, w1
 ; CHECK-NEXT:    smsubl x0, w9, w8, x2
 ; CHECK-NEXT:    ret
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i8 %x1 to i64
   %mul = mul i64 %sext2, %sext
@@ -780,7 +780,7 @@ define i64 @smsubl_ldrsw_b_commuted(i32* %x0, i8 %x1, i64 %x2) {
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) {
+define i64 @smsubl_ldrsw_ldrsw(ptr %x0, ptr %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsw_ldrsw:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -788,8 +788,8 @@ define i64 @smsubl_ldrsw_ldrsw(i32* %x0, i32* %x1, i64 %x2) {
 ; CHECK-NEXT:    smsubl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
-  %ext64_2 = load i32, i32* %x1
+  %ext64 = load i32, ptr %x0
+  %ext64_2 = load i32, ptr %x1
   %sext = sext i32 %ext64 to i64
   %sext2 = sext i32 %ext64_2 to i64
   %mul = mul i64 %sext, %sext2
@@ -814,7 +814,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) {
+define i64 @smsubl_ldrsw_shift(ptr %x0, i64 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsw_shift:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -822,7 +822,7 @@ define i64 @smsubl_ldrsw_shift(i32* %x0, i64 %x1, i64 %x2) {
 ; CHECK-NEXT:    smsubl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %shl = shl i64 %x1, 32
   %shr = ashr exact i64 %shl, 32
@@ -831,7 +831,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @smsubl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @smsubl_ldrsw_zextb(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: smsubl_ldrsw_zextb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsw x8, [x0]
@@ -840,7 +840,7 @@ define i64 @smsubl_ldrsw_zextb(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    smsubl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %sext = sext i32 %ext64 to i64
   %zext = zext i8 %x1 to i64
   %mul = mul i64 %sext, %zext
@@ -905,7 +905,7 @@ entry:
   ret i64 %tmp3
 }
 
-define i64 @umull_ldrb_h(i8* %x0, i16 %x1) {
+define i64 @umull_ldrb_h(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: umull_ldrb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -914,14 +914,14 @@ define i64 @umull_ldrb_h(i8* %x0, i16 %x1) {
 ; CHECK-NEXT:    smull x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext, %zext4
   ret i64 %mul
 }
 
-define i64 @umull_ldrb_h_commuted(i8* %x0, i16 %x1) {
+define i64 @umull_ldrb_h_commuted(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: umull_ldrb_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -930,14 +930,14 @@ define i64 @umull_ldrb_h_commuted(i8* %x0, i16 %x1) {
 ; CHECK-NEXT:    smull x0, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext4, %zext
   ret i64 %mul
 }
 
-define i64 @umull_ldrh_w(i16* %x0, i32 %x1) {
+define i64 @umull_ldrh_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: umull_ldrh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -945,14 +945,14 @@ define i64 @umull_ldrh_w(i16* %x0, i32 %x1) {
 ; CHECK-NEXT:    mul x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %zext = zext i16 %ext64 to i64
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %zext, %zext4
   ret i64 %mul
 }
 
-define i64 @umull_ldr_b(i32* %x0, i8 %x1) {
+define i64 @umull_ldr_b(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: umull_ldr_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -961,14 +961,14 @@ define i64 @umull_ldr_b(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    mul x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %zext = zext i32 %ext64 to i64
   %zext4 = zext i8 %x1 to i64
   %mul = mul i64 %zext, %zext4
   ret i64 %mul
 }
 
-define i64 @umull_ldr2_w(i64* %x0, i32 %x1) {
+define i64 @umull_ldr2_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: umull_ldr2_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -976,14 +976,14 @@ define i64 @umull_ldr2_w(i64* %x0, i32 %x1) {
 ; CHECK-NEXT:    mul x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %and, %zext4
   ret i64 %mul
 }
 
-define i64 @umull_ldr2_ldr2(i64* %x0, i64* %x1) {
+define i64 @umull_ldr2_ldr2(ptr %x0, ptr %x1) {
 ; CHECK-LABEL: umull_ldr2_ldr2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -991,15 +991,15 @@ define i64 @umull_ldr2_ldr2(i64* %x0, i64* %x1) {
 ; CHECK-NEXT:    mul x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
-  %ext64_2 = load i64, i64* %x1
+  %ext64_2 = load i64, ptr %x1
   %and2 = and i64 %ext64_2, 4294967295
   %mul = mul i64 %and, %and2
   ret i64 %mul
 }
 
-define i64 @umull_ldr2_d(i64* %x0, i64 %x1) {
+define i64 @umull_ldr2_d(ptr %x0, i64 %x1) {
 ; CHECK-LABEL: umull_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1007,14 +1007,14 @@ define i64 @umull_ldr2_d(i64* %x0, i64 %x1) {
 ; CHECK-NEXT:    mul x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %and2 = and i64 %x1, 4294967295
   %mul = mul i64 %and, %and2
   ret i64 %mul
 }
 
-define i64 @umaddl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @umaddl_ldrb_h(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldrb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -1023,7 +1023,7 @@ define i64 @umaddl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smaddl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1031,7 +1031,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @umaddl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @umaddl_ldrb_h_commuted(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldrb_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -1040,7 +1040,7 @@ define i64 @umaddl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smaddl x0, w9, w8, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext4, %zext
@@ -1048,7 +1048,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @umaddl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) {
+define i64 @umaddl_ldrh_w(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldrh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -1056,7 +1056,7 @@ define i64 @umaddl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) {
 ; CHECK-NEXT:    madd x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %zext = zext i16 %ext64 to i64
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1064,7 +1064,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @umaddl_ldr_b(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @umaddl_ldr_b(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldr_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1073,7 +1073,7 @@ define i64 @umaddl_ldr_b(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    madd x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %zext = zext i32 %ext64 to i64
   %zext4 = zext i8 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1081,7 +1081,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @umaddl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) {
+define i64 @umaddl_ldr2_w(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldr2_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1089,7 +1089,7 @@ define i64 @umaddl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) {
 ; CHECK-NEXT:    madd x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %and, %zext4
@@ -1097,7 +1097,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @umaddl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) {
+define i64 @umaddl_ldr2_ldr2(ptr %x0, ptr %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldr2_ldr2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1105,16 +1105,16 @@ define i64 @umaddl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) {
 ; CHECK-NEXT:    madd x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
-  %ext64_2 = load i64, i64* %x1
+  %ext64_2 = load i64, ptr %x1
   %and2 = and i64 %ext64_2, 4294967295
   %mul = mul i64 %and, %and2
   %add = add i64 %mul, %x2
   ret i64 %add
 }
 
-define i64 @umaddl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) {
+define i64 @umaddl_ldr2_d(ptr %x0, i64 %x1, i64 %x2) {
 ; CHECK-LABEL: umaddl_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1122,7 +1122,7 @@ define i64 @umaddl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) {
 ; CHECK-NEXT:    madd x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %and2 = and i64 %x1, 4294967295
   %mul = mul i64 %and, %and2
@@ -1130,7 +1130,7 @@ entry:
   ret i64 %add
 }
 
-define i64 @umnegl_ldrb_h(i8* %x0, i16 %x1) {
+define i64 @umnegl_ldrb_h(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: umnegl_ldrb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -1139,7 +1139,7 @@ define i64 @umnegl_ldrb_h(i8* %x0, i16 %x1) {
 ; CHECK-NEXT:    smnegl x0, w8, w9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1147,7 +1147,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umnegl_ldrb_h_commuted(i8* %x0, i16 %x1) {
+define i64 @umnegl_ldrb_h_commuted(ptr %x0, i16 %x1) {
 ; CHECK-LABEL: umnegl_ldrb_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -1156,7 +1156,7 @@ define i64 @umnegl_ldrb_h_commuted(i8* %x0, i16 %x1) {
 ; CHECK-NEXT:    smnegl x0, w9, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext4, %zext
@@ -1164,7 +1164,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umnegl_ldrh_w(i16* %x0, i32 %x1) {
+define i64 @umnegl_ldrh_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: umnegl_ldrh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -1172,7 +1172,7 @@ define i64 @umnegl_ldrh_w(i16* %x0, i32 %x1) {
 ; CHECK-NEXT:    mneg x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %zext = zext i16 %ext64 to i64
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1180,7 +1180,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umnegl_ldr_b(i32* %x0, i8 %x1) {
+define i64 @umnegl_ldr_b(ptr %x0, i8 %x1) {
 ; CHECK-LABEL: umnegl_ldr_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1189,7 +1189,7 @@ define i64 @umnegl_ldr_b(i32* %x0, i8 %x1) {
 ; CHECK-NEXT:    mneg x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %zext = zext i32 %ext64 to i64
   %zext4 = zext i8 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1197,7 +1197,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umnegl_ldr2_w(i64* %x0, i32 %x1) {
+define i64 @umnegl_ldr2_w(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: umnegl_ldr2_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1205,7 +1205,7 @@ define i64 @umnegl_ldr2_w(i64* %x0, i32 %x1) {
 ; CHECK-NEXT:    mneg x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %and, %zext4
@@ -1213,7 +1213,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umnegl_ldr2_ldr2(i64* %x0, i64* %x1) {
+define i64 @umnegl_ldr2_ldr2(ptr %x0, ptr %x1) {
 ; CHECK-LABEL: umnegl_ldr2_ldr2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1221,16 +1221,16 @@ define i64 @umnegl_ldr2_ldr2(i64* %x0, i64* %x1) {
 ; CHECK-NEXT:    mneg x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
-  %ext64_2 = load i64, i64* %x1
+  %ext64_2 = load i64, ptr %x1
   %and2 = and i64 %ext64_2, 4294967295
   %mul = mul i64 %and, %and2
   %sub = sub i64 0, %mul
   ret i64 %sub
 }
 
-define i64 @umnegl_ldr2_d(i64* %x0, i64 %x1) {
+define i64 @umnegl_ldr2_d(ptr %x0, i64 %x1) {
 ; CHECK-LABEL: umnegl_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1238,7 +1238,7 @@ define i64 @umnegl_ldr2_d(i64* %x0, i64 %x1) {
 ; CHECK-NEXT:    mneg x0, x8, x9
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %and2 = and i64 %x1, 4294967295
   %mul = mul i64 %and, %and2
@@ -1246,7 +1246,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umsubl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @umsubl_ldrb_h(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldrb_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -1255,7 +1255,7 @@ define i64 @umsubl_ldrb_h(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smsubl x0, w8, w9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1263,7 +1263,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umsubl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
+define i64 @umsubl_ldrb_h_commuted(ptr %x0, i16 %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldrb_h_commuted:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -1272,7 +1272,7 @@ define i64 @umsubl_ldrb_h_commuted(i8* %x0, i16 %x1, i64 %x2) {
 ; CHECK-NEXT:    smsubl x0, w9, w8, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i8, i8* %x0
+  %ext64 = load i8, ptr %x0
   %zext = zext i8 %ext64 to i64
   %zext4 = zext i16 %x1 to i64
   %mul = mul i64 %zext4, %zext
@@ -1280,7 +1280,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umsubl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) {
+define i64 @umsubl_ldrh_w(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldrh_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -1288,7 +1288,7 @@ define i64 @umsubl_ldrh_w(i16* %x0, i32 %x1, i64 %x2) {
 ; CHECK-NEXT:    msub x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i16, i16* %x0
+  %ext64 = load i16, ptr %x0
   %zext = zext i16 %ext64 to i64
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1296,7 +1296,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umsubl_ldr_b(i32* %x0, i8 %x1, i64 %x2) {
+define i64 @umsubl_ldr_b(ptr %x0, i8 %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldr_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1305,7 +1305,7 @@ define i64 @umsubl_ldr_b(i32* %x0, i8 %x1, i64 %x2) {
 ; CHECK-NEXT:    msub x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i32, i32* %x0
+  %ext64 = load i32, ptr %x0
   %zext = zext i32 %ext64 to i64
   %zext4 = zext i8 %x1 to i64
   %mul = mul i64 %zext, %zext4
@@ -1313,7 +1313,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umsubl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) {
+define i64 @umsubl_ldr2_w(ptr %x0, i32 %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldr2_w:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1321,7 +1321,7 @@ define i64 @umsubl_ldr2_w(i64* %x0, i32 %x1, i64 %x2) {
 ; CHECK-NEXT:    msub x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %and, %zext4
@@ -1329,7 +1329,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umsubl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) {
+define i64 @umsubl_ldr2_ldr2(ptr %x0, ptr %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldr2_ldr2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1337,16 +1337,16 @@ define i64 @umsubl_ldr2_ldr2(i64* %x0, i64* %x1, i64 %x2) {
 ; CHECK-NEXT:    msub x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
-  %ext64_2 = load i64, i64* %x1
+  %ext64_2 = load i64, ptr %x1
   %and2 = and i64 %ext64_2, 4294967295
   %mul = mul i64 %and, %and2
   %sub = sub i64 %x2, %mul
   ret i64 %sub
 }
 
-define i64 @umsubl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) {
+define i64 @umsubl_ldr2_d(ptr %x0, i64 %x1, i64 %x2) {
 ; CHECK-LABEL: umsubl_ldr2_d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -1354,7 +1354,7 @@ define i64 @umsubl_ldr2_d(i64* %x0, i64 %x1, i64 %x2) {
 ; CHECK-NEXT:    msub x0, x8, x9, x2
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 4294967295
   %and2 = and i64 %x1, 4294967295
   %mul = mul i64 %and, %and2
@@ -1362,7 +1362,7 @@ entry:
   ret i64 %sub
 }
 
-define i64 @umull_ldr2_w_cc1(i64* %x0, i32 %x1) {
+define i64 @umull_ldr2_w_cc1(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: umull_ldr2_w_cc1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -1371,14 +1371,14 @@ define i64 @umull_ldr2_w_cc1(i64* %x0, i32 %x1) {
 ; CHECK-NEXT:    mul x0, x9, x8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 2147483647
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %and, %zext4
   ret i64 %mul
 }
 
-define i64 @umull_ldr2_w_cc2(i64* %x0, i32 %x1) {
+define i64 @umull_ldr2_w_cc2(ptr %x0, i32 %x1) {
 ; CHECK-LABEL: umull_ldr2_w_cc2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -1387,7 +1387,7 @@ define i64 @umull_ldr2_w_cc2(i64* %x0, i32 %x1) {
 ; CHECK-NEXT:    mul x0, x9, x8
 ; CHECK-NEXT:    ret
 entry:
-  %ext64 = load i64, i64* %x0
+  %ext64 = load i64, ptr %x0
   %and = and i64 %ext64, 8589934591
   %zext4 = zext i32 %x1 to i64
   %mul = mul i64 %and, %zext4

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll b/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
index e01ef7b9eb348..7bf444344f286 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-sched-store.ll
@@ -4,7 +4,7 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-define dso_local void @memset_unroll2(double* nocapture %array, i64 %size) {
+define dso_local void @memset_unroll2(ptr nocapture %array, i64 %size) {
 ; DEFAULT-LABEL: memset_unroll2:
 ; DEFAULT:       // %bb.0: // %entry
 ; DEFAULT-NEXT:    fmov v0.2d, #2.00000000
@@ -52,33 +52,25 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index16, %vector.body ]
   %niter = phi i64 [ %size, %entry ], [ %niter.nsub.3, %vector.body ]
-  %array0 = getelementptr inbounds double, double* %array, i64 %index
-  %array0.cast = bitcast double* %array0 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array0.cast, align 8
-  %array2 = getelementptr inbounds double, double* %array0, i64 2
-  %array2.cast = bitcast double* %array2 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array2.cast, align 8
+  %array0 = getelementptr inbounds double, ptr %array, i64 %index
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array0, align 8
+  %array2 = getelementptr inbounds double, ptr %array0, i64 2
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array2, align 8
   %index4 = or i64 %index, 4
-  %array4 = getelementptr inbounds double, double* %array, i64 %index4
-  %array4.cast = bitcast double* %array4 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array4.cast, align 8
-  %array6 = getelementptr inbounds double, double* %array4, i64 2
-  %array6.cast = bitcast double* %array6 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array6.cast, align 8
+  %array4 = getelementptr inbounds double, ptr %array, i64 %index4
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array4, align 8
+  %array6 = getelementptr inbounds double, ptr %array4, i64 2
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array6, align 8
   %index8 = or i64 %index, 8
-  %array8 = getelementptr inbounds double, double* %array, i64 %index8
-  %array8.cast = bitcast double* %array8 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array8.cast, align 8
-  %array10 = getelementptr inbounds double, double* %array8, i64 2
-  %array10.cast = bitcast double* %array10 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array10.cast, align 8
+  %array8 = getelementptr inbounds double, ptr %array, i64 %index8
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array8, align 8
+  %array10 = getelementptr inbounds double, ptr %array8, i64 2
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array10, align 8
   %index12 = or i64 %index, 12
-  %array12 = getelementptr inbounds double, double* %array, i64 %index12
-  %array12.cast = bitcast double* %array12 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array12.cast, align 8
-  %array14 = getelementptr inbounds double, double* %array12, i64 2
-  %array14.cast = bitcast double* %array14 to <2 x double>*
-  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, <2 x double>* %array14.cast, align 8
+  %array12 = getelementptr inbounds double, ptr %array, i64 %index12
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array12, align 8
+  %array14 = getelementptr inbounds double, ptr %array12, i64 2
+  store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array14, align 8
   %index16 = add i64 %index, 16
   %niter.nsub.3 = add i64 %niter, -4
   %niter.ncmp.3 = icmp eq i64 %niter.nsub.3, 0

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll b/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll
index 12a4939e9e520..05ece948b4828 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-signedreturnaddress.ll
@@ -7,7 +7,7 @@
 ; therefore this instruction can be safely used for any pre Armv8.3-A architectures.
 ; On Armv8.3-A and onwards XPACI is available so use that instead.
 
-define i8* @ra0() nounwind readnone {
+define ptr @ra0() nounwind readnone {
 entry:
 ; CHECK-LABEL: ra0:
 ; CHECK-NEXT:     str     x30, [sp, #-16]!
@@ -20,11 +20,11 @@ entry:
 ; CHECKV83-NEXT:  mov     x0, x30
 ; CHECKV83-NEXT:  ldr     x30, [sp], #16
 ; CHECKV83-NEXT:  ret
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @ra1() nounwind readnone #0 {
+define ptr @ra1() nounwind readnone #0 {
 entry:
 ; CHECK-LABEL: ra1:
 ; CHECK:          hint    #25
@@ -40,10 +40,10 @@ entry:
 ; CHECKV83-NEXT:  mov     x0, x30
 ; CHECKV83-NEXT:  ldr     x30, [sp], #16
 ; CHECKV83-NEXT:  retaa
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
 attributes #0 = { "sign-return-address"="all" }
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 9ebbe18dc1ddb..50a0f61445736 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -1,52 +1,52 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s -o -| FileCheck %s
 
-define <8 x i16> @smull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @smull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = mul <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @smull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @smull_v4i16_v4i32(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = mul <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @smull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @smull_v2i32_v2i64(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = mul <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
 }
 
-define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
+define <8 x i32> @smull_zext_v8i8_v8i32(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_zext_v8i8_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -55,15 +55,15 @@ define <8 x i32> @smull_zext_v8i8_v8i32(<8 x i8>* %A, <8 x i16>* %B) nounwind {
 ; CHECK-NEXT:    smull2 v1.4s, v0.8h, v2.8h
 ; CHECK-NEXT:    smull v0.4s, v0.4h, v2.4h
 ; CHECK-NEXT:    ret
-  %load.A = load <8 x i8>, <8 x i8>* %A
-  %load.B = load <8 x i16>, <8 x i16>* %B
+  %load.A = load <8 x i8>, ptr %A
+  %load.B = load <8 x i16>, ptr %B
   %zext.A = zext <8 x i8> %load.A to <8 x i32>
   %sext.B = sext <8 x i16> %load.B to <8 x i32>
   %res = mul <8 x i32> %zext.A, %sext.B
   ret <8 x i32> %res
 }
 
-define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_zext_v8i8_v8i32_sext_first_operand:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -72,15 +72,15 @@ define <8 x i32> @smull_zext_v8i8_v8i32_sext_first_operand(<8 x i16>* %A, <8 x i
 ; CHECK-NEXT:    smull2 v1.4s, v2.8h, v0.8h
 ; CHECK-NEXT:    smull v0.4s, v2.4h, v0.4h
 ; CHECK-NEXT:    ret
-  %load.A = load <8 x i16>, <8 x i16>* %A
-  %load.B = load <8 x i8>, <8 x i8>* %B
+  %load.A = load <8 x i16>, ptr %A
+  %load.B = load <8 x i8>, ptr %B
   %sext.A = sext <8 x i16> %load.A to <8 x i32>
   %zext.B = zext <8 x i8> %load.B to <8 x i32>
   %res = mul <8 x i32> %sext.A, %zext.B
   ret <8 x i32> %res
 }
 
-define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_zext_v8i8_v8i32_top_bit_is_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -93,16 +93,16 @@ define <8 x i32> @smull_zext_v8i8_v8i32_top_bit_is_1(<8 x i16>* %A, <8 x i16>* %
 ; CHECK-NEXT:    mul v1.4s, v3.4s, v1.4s
 ; CHECK-NEXT:    mul v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
-  %load.A = load <8 x i16>, <8 x i16>* %A
+  %load.A = load <8 x i16>, ptr %A
   %or.A = or <8 x i16> %load.A, <i16 u0x8000, i16 u0x8000, i16 u0x8000, i16 u0x8000, i16 u0x8000, i16 u0x8000, i16 u0x8000, i16 u0x8000>
-  %load.B = load <8 x i16>, <8 x i16>* %B
+  %load.B = load <8 x i16>, ptr %B
   %zext.A = zext <8 x i16> %or.A  to <8 x i32>
   %sext.B = sext <8 x i16> %load.B to <8 x i32>
   %res = mul <8 x i32> %zext.A, %sext.B
   ret <8 x i32> %res
 }
 
-define <4 x i32> @smull_zext_v4i16_v4i32(<4 x i8>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_zext_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -110,15 +110,15 @@ define <4 x i32> @smull_zext_v4i16_v4i32(<4 x i8>* %A, <4 x i16>* %B) nounwind {
 ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %load.A = load <4 x i8>, <4 x i8>* %A
-  %load.B = load <4 x i16>, <4 x i16>* %B
+  %load.A = load <4 x i8>, ptr %A
+  %load.B = load <4 x i16>, ptr %B
   %zext.A = zext <4 x i8> %load.A to <4 x i32>
   %sext.B = sext <4 x i16> %load.B to <4 x i32>
   %res = mul <4 x i32> %zext.A, %sext.B
   ret <4 x i32> %res
 }
 
-define <2 x i64> @smull_zext_v2i32_v2i64(<2 x i16>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @smull_zext_v2i32_v2i64(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_zext_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -132,15 +132,15 @@ define <2 x i64> @smull_zext_v2i32_v2i64(<2 x i16>* %A, <2 x i32>* %B) nounwind
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x9
 ; CHECK-NEXT:    ret
-  %load.A = load <2 x i16>, <2 x i16>* %A
-  %load.B = load <2 x i32>, <2 x i32>* %B
+  %load.A = load <2 x i16>, ptr %A
+  %load.B = load <2 x i32>, ptr %B
   %zext.A = zext <2 x i16> %load.A to <2 x i64>
   %sext.B = sext <2 x i32> %load.B to <2 x i64>
   %res = mul <2 x i64> %zext.A, %sext.B
   ret <2 x i64> %res
 }
 
-define <2 x i64> @smull_zext_and_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @smull_zext_and_v2i32_v2i64(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_zext_and_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -148,61 +148,61 @@ define <2 x i64> @smull_zext_and_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounw
 ; CHECK-NEXT:    bic v0.2s, #128, lsl #24
 ; CHECK-NEXT:    smull v0.2d, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %load.A = load <2 x i32>, <2 x i32>* %A
+  %load.A = load <2 x i32>, ptr %A
   %and.A = and <2 x i32> %load.A, <i32 u0x7FFFFFFF, i32 u0x7FFFFFFF>
-  %load.B = load <2 x i32>, <2 x i32>* %B
+  %load.B = load <2 x i32>, ptr %B
   %zext.A = zext <2 x i32> %and.A to <2 x i64>
   %sext.B = sext <2 x i32> %load.B to <2 x i64>
   %res = mul <2 x i64> %zext.A, %sext.B
   ret <2 x i64> %res
 }
 
-define <8 x i16> @umull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @umull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull v0.8h, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = mul <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @umull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @umull_v4i16_v4i32(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull v0.4s, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = mul <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @umull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @umull_v2i32_v2i64(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull v0.2d, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = mul <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
 }
 
-define <8 x i16> @amull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @amull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: amull_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -210,8 +210,8 @@ define <8 x i16> @amull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ; CHECK-NEXT:    smull v0.8h, v0.8b, v1.8b
 ; CHECK-NEXT:    bic v0.8h, #255, lsl #8
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = mul <8 x i16> %tmp3, %tmp4
@@ -219,7 +219,7 @@ define <8 x i16> @amull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind {
   ret <8 x i16> %and
 }
 
-define <4 x i32> @amull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @amull_v4i16_v4i32(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: amull_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x0]
@@ -228,8 +228,8 @@ define <4 x i32> @amull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ; CHECK-NEXT:    smull v1.4s, v1.4h, v2.4h
 ; CHECK-NEXT:    and v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = mul <4 x i32> %tmp3, %tmp4
@@ -237,7 +237,7 @@ define <4 x i32> @amull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind {
   ret <4 x i32> %and
 }
 
-define <2 x i64> @amull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @amull_v2i32_v2i64(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: amull_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x0]
@@ -246,8 +246,8 @@ define <2 x i64> @amull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ; CHECK-NEXT:    smull v1.2d, v1.2s, v2.2s
 ; CHECK-NEXT:    and v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = mul <2 x i64> %tmp3, %tmp4
@@ -255,7 +255,7 @@ define <2 x i64> @amull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
   ret <2 x i64> %and
 }
 
-define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i16> @smlal_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlal_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -263,9 +263,9 @@ define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    smlal v0.8h, v1.8b, v2.8b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i8>, <8 x i8>* %C
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i8>, ptr %C
   %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
   %tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -273,7 +273,7 @@ define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
   ret <8 x i16> %tmp7
 }
 
-define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i32> @smlal_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlal_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -281,9 +281,9 @@ define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    smlal v0.4s, v1.4h, v2.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i16>, <4 x i16>* %C
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i16>, ptr %C
   %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
   %tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -291,7 +291,7 @@ define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
   ret <4 x i32> %tmp7
 }
 
-define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i64> @smlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlal_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -299,9 +299,9 @@ define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    smlal v0.2d, v1.2s, v2.2s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i32>, <2 x i32>* %C
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i32>, ptr %C
   %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
   %tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -309,7 +309,7 @@ define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
   ret <2 x i64> %tmp7
 }
 
-define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i16> @umlal_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlal_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -317,9 +317,9 @@ define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    umlal v0.8h, v1.8b, v2.8b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i8>, <8 x i8>* %C
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i8>, ptr %C
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
   %tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -327,7 +327,7 @@ define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
   ret <8 x i16> %tmp7
 }
 
-define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i32> @umlal_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlal_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -335,9 +335,9 @@ define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    umlal v0.4s, v1.4h, v2.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i16>, <4 x i16>* %C
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i16>, ptr %C
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
   %tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -345,7 +345,7 @@ define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
   ret <4 x i32> %tmp7
 }
 
-define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i64> @umlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlal_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -353,9 +353,9 @@ define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    umlal v0.2d, v1.2s, v2.2s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i32>, <2 x i32>* %C
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i32>, ptr %C
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
   %tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -363,7 +363,7 @@ define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
   ret <2 x i64> %tmp7
 }
 
-define <8 x i16> @amlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i16> @amlal_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: amlal_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -372,9 +372,9 @@ define <8 x i16> @amlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
 ; CHECK-NEXT:    smlal v0.8h, v1.8b, v2.8b
 ; CHECK-NEXT:    bic v0.8h, #255, lsl #8
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i8>, <8 x i8>* %C
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i8>, ptr %C
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
   %tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -383,7 +383,7 @@ define <8 x i16> @amlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
   ret <8 x i16> %and
 }
 
-define <4 x i32> @amlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i32> @amlal_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: amlal_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -393,9 +393,9 @@ define <4 x i32> @amlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
 ; CHECK-NEXT:    smlal v2.4s, v1.4h, v3.4h
 ; CHECK-NEXT:    and v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i16>, <4 x i16>* %C
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i16>, ptr %C
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
   %tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -404,7 +404,7 @@ define <4 x i32> @amlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
   ret <4 x i32> %and
 }
 
-define <2 x i64> @amlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i64> @amlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: amlal_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -414,9 +414,9 @@ define <2 x i64> @amlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
 ; CHECK-NEXT:    smlal v2.2d, v1.2s, v3.2s
 ; CHECK-NEXT:    and v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i32>, <2 x i32>* %C
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i32>, ptr %C
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
   %tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -425,7 +425,7 @@ define <2 x i64> @amlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
   ret <2 x i64> %and
 }
 
-define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i16> @smlsl_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlsl_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -433,9 +433,9 @@ define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    smlsl v0.8h, v1.8b, v2.8b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i8>, <8 x i8>* %C
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i8>, ptr %C
   %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
   %tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -443,7 +443,7 @@ define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
   ret <8 x i16> %tmp7
 }
 
-define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i32> @smlsl_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlsl_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -451,9 +451,9 @@ define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    smlsl v0.4s, v1.4h, v2.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i16>, <4 x i16>* %C
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i16>, ptr %C
   %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
   %tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -461,7 +461,7 @@ define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
   ret <4 x i32> %tmp7
 }
 
-define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i64> @smlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlsl_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -469,9 +469,9 @@ define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    smlsl v0.2d, v1.2s, v2.2s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i32>, <2 x i32>* %C
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i32>, ptr %C
   %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
   %tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -479,7 +479,7 @@ define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
   ret <2 x i64> %tmp7
 }
 
-define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i16> @umlsl_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlsl_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -487,9 +487,9 @@ define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    umlsl v0.8h, v1.8b, v2.8b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i8>, <8 x i8>* %C
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i8>, ptr %C
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
   %tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -497,7 +497,7 @@ define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
   ret <8 x i16> %tmp7
 }
 
-define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i32> @umlsl_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlsl_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -505,9 +505,9 @@ define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    umlsl v0.4s, v1.4h, v2.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i16>, <4 x i16>* %C
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i16>, ptr %C
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
   %tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -515,7 +515,7 @@ define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
   ret <4 x i32> %tmp7
 }
 
-define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i64> @umlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlsl_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -523,9 +523,9 @@ define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
 ; CHECK-NEXT:    ldr d2, [x2]
 ; CHECK-NEXT:    umlsl v0.2d, v1.2s, v2.2s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i32>, <2 x i32>* %C
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i32>, ptr %C
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
   %tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -533,7 +533,7 @@ define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
   ret <2 x i64> %tmp7
 }
 
-define <8 x i16> @amlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i16> @amlsl_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: amlsl_v8i8_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -542,9 +542,9 @@ define <8 x i16> @amlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
 ; CHECK-NEXT:    smlsl v0.8h, v1.8b, v2.8b
 ; CHECK-NEXT:    bic v0.8h, #255, lsl #8
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i8>, <8 x i8>* %C
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i8>, ptr %C
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
   %tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -553,7 +553,7 @@ define <8 x i16> @amlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no
   ret <8 x i16> %and
 }
 
-define <4 x i32> @amlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i32> @amlsl_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: amlsl_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -563,9 +563,9 @@ define <4 x i32> @amlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
 ; CHECK-NEXT:    smlsl v2.4s, v1.4h, v3.4h
 ; CHECK-NEXT:    and v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i16>, <4 x i16>* %C
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i16>, ptr %C
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
   %tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -574,7 +574,7 @@ define <4 x i32> @amlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C)
   ret <4 x i32> %and
 }
 
-define <2 x i64> @amlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i64> @amlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: amlsl_v2i32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -584,9 +584,9 @@ define <2 x i64> @amlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C)
 ; CHECK-NEXT:    smlsl v2.2d, v1.2s, v3.2s
 ; CHECK-NEXT:    and v0.16b, v2.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i32>, <2 x i32>* %C
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i32>, ptr %C
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
   %tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -773,7 +773,7 @@ ret <8 x i16> %3
 
 }
 
-define void @distribute(<8 x i16>* %dst, <16 x i8>* %src, i32 %mul) nounwind {
+define void @distribute(ptr %dst, ptr %src, i32 %mul) nounwind {
 ; CHECK-LABEL: distribute:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr q0, [x1]
@@ -787,7 +787,7 @@ entry:
   %0 = trunc i32 %mul to i8
   %1 = insertelement <8 x i8> undef, i8 %0, i32 0
   %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
-  %3 = load <16 x i8>, <16 x i8>* %src, align 1
+  %3 = load <16 x i8>, ptr %src, align 1
   %4 = bitcast <16 x i8> %3 to <2 x double>
   %5 = extractelement <2 x double> %4, i32 1
   %6 = bitcast double %5 to <8 x i8>
@@ -798,7 +798,7 @@ entry:
   %11 = zext <8 x i8> %10 to <8 x i16>
   %12 = add <8 x i16> %7, %11
   %13 = mul <8 x i16> %12, %8
-  store <8 x i16> %13, <8 x i16>* %dst, align 2
+  store <8 x i16> %13, ptr %dst, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
index 24b528fe7df01..cf9ed4d5f0e16 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
@@ -116,7 +116,7 @@ entry:
 
 ; The split bitmask immediates should be hoisted outside loop because they are
 ; loop invariant.
-define void @test8(i64 %a, i64* noalias %src, i64* noalias %dst, i64 %n) {
+define void @test8(i64 %a, ptr noalias %src, ptr noalias %dst, i64 %n) {
 ; CHECK-LABEL: test8:
 ; CHECK:       // %bb.0: // %loop.ph
 ; CHECK-NEXT:    and x9, x0, #0x3ffc00
@@ -150,10 +150,10 @@ loop:
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:
-  %src.arrayidx = getelementptr inbounds i64, i64* %src, i64 %iv
-  %val = load i64, i64* %src.arrayidx
-  %dst.arrayidx = getelementptr inbounds i64, i64* %dst, i64 %iv
-  store i64 %val, i64* %dst.arrayidx
+  %src.arrayidx = getelementptr inbounds i64, ptr %src, i64 %iv
+  %val = load i64, ptr %src.arrayidx
+  %dst.arrayidx = getelementptr inbounds i64, ptr %dst, i64 %iv
+  store i64 %val, ptr %dst.arrayidx
   br label %for.inc
 
 if.else:
@@ -169,7 +169,7 @@ exit:
 }
 
 ; This constant should not be split because the `and` is not loop invariant.
-define i32 @test9(i32* nocapture %x, i32* nocapture readonly %y, i32 %n) {
+define i32 @test9(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
 ; CHECK-LABEL: test9:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w2, #1
@@ -201,11 +201,11 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
-  %arrayidx = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %y, i64 %indvars.iv
+  %0 = load i32, ptr %arrayidx, align 4
   %and = and i32 %0, 2098176
-  %arrayidx2 = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
-  store i32 %and, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+  store i32 %and, ptr %arrayidx2, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
@@ -222,7 +222,7 @@ for.body:                                         ; preds = %for.body.preheader,
 ;
 ; In this case, the constant should not be split because it causes more
 ; instructions.
-define void @test10(i32* nocapture %x, i32* nocapture readonly %y, i32* nocapture %z) {
+define void @test10(ptr nocapture %x, ptr nocapture readonly %y, ptr nocapture %z) {
 ; CHECK-LABEL: test10:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x1]
@@ -235,12 +235,12 @@ define void @test10(i32* nocapture %x, i32* nocapture readonly %y, i32* nocaptur
 ; CHECK-NEXT:    str w8, [x2]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %y, align 4
+  %0 = load i32, ptr %y, align 4
   %and = and i32 %0, 2098176
-  store i32 %and, i32* %x, align 4
-  %1 = load i32, i32* %y, align 4
+  store i32 %and, ptr %x, align 4
+  %1 = load i32, ptr %y, align 4
   %or = or i32 %1, 2098176
-  store i32 %or, i32* %z, align 4
+  store i32 %or, ptr %z, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
index 1c093989c3951..d97d9d1e0e091 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
@@ -10,16 +10,16 @@
 ; CHECK:SU(3):   STRXui %1:gpr64, %0:gpr64common, 2
 ; CHECK:SU(2):   STRXui %1:gpr64, %0:gpr64common, 3
 ; CHECK:SU(5):   STRXui %1:gpr64, %0:gpr64common, 4
-define i64 @stp_i64_scale(i64* nocapture %P, i64 %v) {
+define i64 @stp_i64_scale(ptr nocapture %P, i64 %v) {
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %P, i64 3
-  store i64 %v, i64* %arrayidx
-  %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2
-  store i64 %v, i64* %arrayidx1
-  %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx2
-  %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 4
-  store i64 %v, i64* %arrayidx3
+  %arrayidx = getelementptr inbounds i64, ptr %P, i64 3
+  store i64 %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2
+  store i64 %v, ptr %arrayidx1
+  %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx2
+  %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 4
+  store i64 %v, ptr %arrayidx3
   ret i64 %v
 }
 
@@ -31,16 +31,16 @@ entry:
 ; CHECK:SU(3):   STRWui %1:gpr32, %0:gpr64common, 2
 ; CHECK:SU(2):   STRWui %1:gpr32, %0:gpr64common, 3
 ; CHECK:SU(5):   STRWui %1:gpr32, %0:gpr64common, 4
-define i32 @stp_i32_scale(i32* nocapture %P, i32 %v) {
+define i32 @stp_i32_scale(ptr nocapture %P, i32 %v) {
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %P, i32 3
-  store i32 %v, i32* %arrayidx
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 2
-  store i32 %v, i32* %arrayidx1
-  %arrayidx2 = getelementptr inbounds i32, i32* %P, i32 1
-  store i32 %v, i32* %arrayidx2
-  %arrayidx3 = getelementptr inbounds i32, i32* %P, i32 4
-  store i32 %v, i32* %arrayidx3
+  %arrayidx = getelementptr inbounds i32, ptr %P, i32 3
+  store i32 %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i32 2
+  store i32 %v, ptr %arrayidx1
+  %arrayidx2 = getelementptr inbounds i32, ptr %P, i32 1
+  store i32 %v, ptr %arrayidx2
+  %arrayidx3 = getelementptr inbounds i32, ptr %P, i32 4
+  store i32 %v, ptr %arrayidx3
   ret i32 %v
 }
 
@@ -52,16 +52,16 @@ entry:
 ; CHECK:SU(3):   STURXi %1:gpr64, %0:gpr64common, -8
 ; CHECK:SU(4):   STURXi %1:gpr64, %0:gpr64common, -16
 ; CHECK:SU(5):   STURXi %1:gpr64, %0:gpr64common, -32
-define void @stp_i64_unscale(i64* nocapture %P, i64 %v) #0 {
+define void @stp_i64_unscale(ptr nocapture %P, i64 %v) #0 {
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %P, i64 -3
-  store i64 %v, i64* %arrayidx
-  %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx1
-  %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 -2
-  store i64 %v, i64* %arrayidx2
-  %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 -4
-  store i64 %v, i64* %arrayidx3
+  %arrayidx = getelementptr inbounds i64, ptr %P, i64 -3
+  store i64 %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx1
+  %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 -2
+  store i64 %v, ptr %arrayidx2
+  %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 -4
+  store i64 %v, ptr %arrayidx3
   ret void
 }
 
@@ -73,16 +73,16 @@ entry:
 ; CHECK:SU(3):   STURWi %1:gpr32, %0:gpr64common, -4
 ; CHECK:SU(4):   STURWi %1:gpr32, %0:gpr64common, -8
 ; CHECK:SU(5):   STURWi %1:gpr32, %0:gpr64common, -16
-define void @stp_i32_unscale(i32* nocapture %P, i32 %v) #0 {
+define void @stp_i32_unscale(ptr nocapture %P, i32 %v) #0 {
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %P, i32 -3
-  store i32 %v, i32* %arrayidx
-  %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 -1
-  store i32 %v, i32* %arrayidx1
-  %arrayidx2 = getelementptr inbounds i32, i32* %P, i32 -2
-  store i32 %v, i32* %arrayidx2
-  %arrayidx3 = getelementptr inbounds i32, i32* %P, i32 -4
-  store i32 %v, i32* %arrayidx3
+  %arrayidx = getelementptr inbounds i32, ptr %P, i32 -3
+  store i32 %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i32 -1
+  store i32 %v, ptr %arrayidx1
+  %arrayidx2 = getelementptr inbounds i32, ptr %P, i32 -2
+  store i32 %v, ptr %arrayidx2
+  %arrayidx3 = getelementptr inbounds i32, ptr %P, i32 -4
+  store i32 %v, ptr %arrayidx3
   ret void
 }
 
@@ -94,16 +94,16 @@ entry:
 ; CHECK:SU(4):   STRDui %1:fpr64, %0:gpr64common, 2
 ; CHECK:SU(2):   STRDui %1:fpr64, %0:gpr64common, 3
 ; CHECK:SU(5):   STRDui %1:fpr64, %0:gpr64common, 4
-define void @stp_double(double* nocapture %P, double %v)  {
+define void @stp_double(ptr nocapture %P, double %v)  {
 entry:
-  %arrayidx = getelementptr inbounds double, double* %P, i64 3
-  store double %v, double* %arrayidx
-  %arrayidx1 = getelementptr inbounds double, double* %P, i64 1
-  store double %v, double* %arrayidx1
-  %arrayidx2 = getelementptr inbounds double, double* %P, i64 2
-  store double %v, double* %arrayidx2
-  %arrayidx3 = getelementptr inbounds double, double* %P, i64 4
-  store double %v, double* %arrayidx3
+  %arrayidx = getelementptr inbounds double, ptr %P, i64 3
+  store double %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds double, ptr %P, i64 1
+  store double %v, ptr %arrayidx1
+  %arrayidx2 = getelementptr inbounds double, ptr %P, i64 2
+  store double %v, ptr %arrayidx2
+  %arrayidx3 = getelementptr inbounds double, ptr %P, i64 4
+  store double %v, ptr %arrayidx3
   ret void
 }
 
@@ -115,16 +115,16 @@ entry:
 ; CHECK:SU(4):   STRSui %1:fpr32, %0:gpr64common, 2
 ; CHECK:SU(2):   STRSui %1:fpr32, %0:gpr64common, 3
 ; CHECK:SU(5):   STRSui %1:fpr32, %0:gpr64common, 4
-define void @stp_float(float* nocapture %P, float %v)  {
+define void @stp_float(ptr nocapture %P, float %v)  {
 entry:
-  %arrayidx = getelementptr inbounds float, float* %P, i64 3
-  store float %v, float* %arrayidx
-  %arrayidx1 = getelementptr inbounds float, float* %P, i64 1
-  store float %v, float* %arrayidx1
-  %arrayidx2 = getelementptr inbounds float, float* %P, i64 2
-  store float %v, float* %arrayidx2
-  %arrayidx3 = getelementptr inbounds float, float* %P, i64 4
-  store float %v, float* %arrayidx3
+  %arrayidx = getelementptr inbounds float, ptr %P, i64 3
+  store float %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds float, ptr %P, i64 1
+  store float %v, ptr %arrayidx1
+  %arrayidx2 = getelementptr inbounds float, ptr %P, i64 2
+  store float %v, ptr %arrayidx2
+  %arrayidx3 = getelementptr inbounds float, ptr %P, i64 4
+  store float %v, ptr %arrayidx3
   ret void
 }
 
@@ -135,16 +135,16 @@ entry:
 ; CHECK:SU(3):   STRXui %1:gpr64, %0:gpr64common, 2 :: (volatile
 ; CHECK:SU(4):   STRXui %1:gpr64, %0:gpr64common, 1 :: (volatile
 ; CHECK:SU(5):   STRXui %1:gpr64, %0:gpr64common, 4 :: (volatile
-define i64 @stp_volatile(i64* nocapture %P, i64 %v) {
+define i64 @stp_volatile(ptr nocapture %P, i64 %v) {
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %P, i64 3
-  store volatile i64 %v, i64* %arrayidx
-  %arrayidx1 = getelementptr inbounds i64, i64* %P, i64 2
-  store volatile i64 %v, i64* %arrayidx1
-  %arrayidx2 = getelementptr inbounds i64, i64* %P, i64 1
-  store volatile i64 %v, i64* %arrayidx2
-  %arrayidx3 = getelementptr inbounds i64, i64* %P, i64 4
-  store volatile i64 %v, i64* %arrayidx3
+  %arrayidx = getelementptr inbounds i64, ptr %P, i64 3
+  store volatile i64 %v, ptr %arrayidx
+  %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 2
+  store volatile i64 %v, ptr %arrayidx1
+  %arrayidx2 = getelementptr inbounds i64, ptr %P, i64 1
+  store volatile i64 %v, ptr %arrayidx2
+  %arrayidx3 = getelementptr inbounds i64, ptr %P, i64 4
+  store volatile i64 %v, ptr %arrayidx3
   ret i64 %v
 }
 
@@ -156,43 +156,43 @@ entry:
 ; CHECK:SU(10):   STRXui %12:gpr64, %0:gpr64common, 1 ::
 ; CHECK:SU(15):   STRXui %17:gpr64, %0:gpr64common, 2 ::
 ; CHECK:SU(20):   STRXui %22:gpr64, %0:gpr64common, 3 ::
-define void @stp_i64_with_ld(i64* noalias nocapture %a, i64* noalias nocapture readnone %b, i64* noalias nocapture readnone %c) {
+define void @stp_i64_with_ld(ptr noalias nocapture %a, ptr noalias nocapture readnone %b, ptr noalias nocapture readnone %c) {
 entry:
-  %arrayidx = getelementptr inbounds i64, i64* %a, i64 8
-  %0 = load i64, i64* %arrayidx, align 8
-  %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 16
-  %1 = load i64, i64* %arrayidx3, align 8
+  %arrayidx = getelementptr inbounds i64, ptr %a, i64 8
+  %0 = load i64, ptr %arrayidx, align 8
+  %arrayidx3 = getelementptr inbounds i64, ptr %a, i64 16
+  %1 = load i64, ptr %arrayidx3, align 8
   %mul = mul nsw i64 %1, %0
-  %2 = load i64, i64* %a, align 8
+  %2 = load i64, ptr %a, align 8
   %add6 = add nsw i64 %2, %mul
-  store i64 %add6, i64* %a, align 8
-  %arrayidx.1 = getelementptr inbounds i64, i64* %a, i64 9
-  %3 = load i64, i64* %arrayidx.1, align 8
-  %arrayidx3.1 = getelementptr inbounds i64, i64* %a, i64 17
-  %4 = load i64, i64* %arrayidx3.1, align 8
+  store i64 %add6, ptr %a, align 8
+  %arrayidx.1 = getelementptr inbounds i64, ptr %a, i64 9
+  %3 = load i64, ptr %arrayidx.1, align 8
+  %arrayidx3.1 = getelementptr inbounds i64, ptr %a, i64 17
+  %4 = load i64, ptr %arrayidx3.1, align 8
   %mul.1 = mul nsw i64 %4, %3
-  %arrayidx5.1 = getelementptr inbounds i64, i64* %a, i64 1
-  %5 = load i64, i64* %arrayidx5.1, align 8
+  %arrayidx5.1 = getelementptr inbounds i64, ptr %a, i64 1
+  %5 = load i64, ptr %arrayidx5.1, align 8
   %add6.1 = add nsw i64 %5, %mul.1
-  store i64 %add6.1, i64* %arrayidx5.1, align 8
-  %arrayidx.2 = getelementptr inbounds i64, i64* %a, i64 10
-  %6 = load i64, i64* %arrayidx.2, align 8
-  %arrayidx3.2 = getelementptr inbounds i64, i64* %a, i64 18
-  %7 = load i64, i64* %arrayidx3.2, align 8
+  store i64 %add6.1, ptr %arrayidx5.1, align 8
+  %arrayidx.2 = getelementptr inbounds i64, ptr %a, i64 10
+  %6 = load i64, ptr %arrayidx.2, align 8
+  %arrayidx3.2 = getelementptr inbounds i64, ptr %a, i64 18
+  %7 = load i64, ptr %arrayidx3.2, align 8
   %mul.2 = mul nsw i64 %7, %6
-  %arrayidx5.2 = getelementptr inbounds i64, i64* %a, i64 2
-  %8 = load i64, i64* %arrayidx5.2, align 8
+  %arrayidx5.2 = getelementptr inbounds i64, ptr %a, i64 2
+  %8 = load i64, ptr %arrayidx5.2, align 8
   %add6.2 = add nsw i64 %8, %mul.2
-  store i64 %add6.2, i64* %arrayidx5.2, align 8
-  %arrayidx.3 = getelementptr inbounds i64, i64* %a, i64 11
-  %9 = load i64, i64* %arrayidx.3, align 8
-  %arrayidx3.3 = getelementptr inbounds i64, i64* %a, i64 19
-  %10 = load i64, i64* %arrayidx3.3, align 8
+  store i64 %add6.2, ptr %arrayidx5.2, align 8
+  %arrayidx.3 = getelementptr inbounds i64, ptr %a, i64 11
+  %9 = load i64, ptr %arrayidx.3, align 8
+  %arrayidx3.3 = getelementptr inbounds i64, ptr %a, i64 19
+  %10 = load i64, ptr %arrayidx3.3, align 8
   %mul.3 = mul nsw i64 %10, %9
-  %arrayidx5.3 = getelementptr inbounds i64, i64* %a, i64 3
-  %11 = load i64, i64* %arrayidx5.3, align 8
+  %arrayidx5.3 = getelementptr inbounds i64, ptr %a, i64 3
+  %11 = load i64, ptr %arrayidx5.3, align 8
   %add6.3 = add nsw i64 %11, %mul.3
-  store i64 %add6.3, i64* %arrayidx5.3, align 8
+  store i64 %add6.3, ptr %arrayidx5.3, align 8
   ret void
 }
 
@@ -206,12 +206,12 @@ entry:
 ; CHECK:SU(3):   STRWui %1:gpr32, %0:gpr64common, 0
 ; CHECK:SU(4):   %3:gpr32common = nsw ADDWri %2:gpr32common, 5, 0
 ; CHECK:SU(5):   STRWui %3:gpr32common, %0:gpr64common, 1
-define void @stp_missing_preds_edges(i32* %p, i32 %m, i32 %n) {
+define void @stp_missing_preds_edges(ptr %p, i32 %m, i32 %n) {
 entry:
-  store i32 %m, i32* %p, align 4
+  store i32 %m, ptr %p, align 4
   %add = add nsw i32 %n, 5
-  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 %add, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 %add, ptr %arrayidx1, align 4
   ret void
 }
 
@@ -232,14 +232,13 @@ entry:
 ; CHECK-FAST-NOT: Cluster ld/st
 ; CHECK-FAST:SU(3):   STRWui %2:gpr32, %0:gpr64common, 0 ::
 ; CHECK-FAST:SU(4):   %3:gpr32 = LDRWui %1:gpr64common, 0 ::
-define i32 @cluster_with_
diff erent_preds(i32* %p, i32* %q) {
+define i32 @cluster_with_
diff erent_preds(ptr %p, ptr %q) {
 entry:
-  store i32 3, i32* %p, align 4
-  %0 = load i32, i32* %q, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %q, i64 1
-  %1 = bitcast i32* %add.ptr to i8*
-  store i8 5, i8* %1, align 1
-  %2 = load i32, i32* %add.ptr, align 4
-  %add = add nsw i32 %2, %0
+  store i32 3, ptr %p, align 4
+  %0 = load i32, ptr %q, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %q, i64 1
+  store i8 5, ptr %add.ptr, align 1
+  %1 = load i32, ptr %add.ptr, align 4
+  %add = add nsw i32 %1, %0
   ret i32 %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
index a150d0383698e..be07404f4b2fc 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
@@ -6,14 +6,14 @@
 ; RUN: llc -mtriple=aarch64-none-linux -tail-dup-placement-threshold=4 < %s | FileCheck %s --check-prefix=CHECK-O2
 ; RUN: llc -mtriple=aarch64-none-linux -tail-dup-placement-threshold=6 < %s | FileCheck %s --check-prefix=CHECK-O3
 
-%a = type { %a*, i32, %b }
+%a = type { ptr, i32, %b }
 %b = type { %c }
 %c = type { i32, i32, [31 x i8] }
 
- at global_ptr = dso_local local_unnamed_addr global %a* null, align 8
+ at global_ptr = dso_local local_unnamed_addr global ptr null, align 8
 @global_int = dso_local local_unnamed_addr global i32 0, align 4
 
-define dso_local void @testcase(%a** nocapture %arg){
+define dso_local void @testcase(ptr nocapture %arg){
 ; CHECK-O2-LABEL: testcase:
 ; CHECK-O2:       // %bb.0: // %entry
 ; CHECK-O2-NEXT:    adrp x8, global_ptr
@@ -55,23 +55,22 @@ define dso_local void @testcase(%a** nocapture %arg){
 ; CHECK-O3-NEXT:    ldr w1, [x9, :lo12:global_int]
 ; CHECK-O3-NEXT:    b externalfunc
 entry:
-  %0 = load %a*, %a** @global_ptr, align 8
-  %cmp.not = icmp eq %a* %0, null
+  %0 = load ptr, ptr @global_ptr, align 8
+  %cmp.not = icmp eq ptr %0, null
   br i1 %cmp.not, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = getelementptr inbounds %a, %a* %0, i64 0, i32 0
-  %2 = load %a*, %a** %1, align 8
-  store %a* %2, %a** %arg, align 8
-  %.pre = load %a*, %a** @global_ptr, align 8
+  %1 = load ptr, ptr %0, align 8
+  store ptr %1, ptr %arg, align 8
+  %.pre = load ptr, ptr @global_ptr, align 8
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %3 = phi %a* [ %.pre, %if.then ], [ null, %entry ]
-  %4 = load i32, i32* @global_int, align 4
-  %5 = getelementptr inbounds %a, %a* %3, i64 0, i32 2, i32 0, i32 1
-  tail call void @externalfunc(i32 10, i32 %4, i32* nonnull %5)
+  %2 = phi ptr [ %.pre, %if.then ], [ null, %entry ]
+  %3 = load i32, ptr @global_int, align 4
+  %4 = getelementptr inbounds %a, ptr %2, i64 0, i32 2, i32 0, i32 1
+  tail call void @externalfunc(i32 10, i32 %3, ptr nonnull %4)
   ret void
 }
 
-declare dso_local void @externalfunc(i32, i32, i32*)
+declare dso_local void @externalfunc(i32, i32, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
index 8a57f9f9dc2d6..28629a8c2f0dd 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
@@ -31,16 +31,16 @@ if.end3:                                          ; preds = %if.then2, %entry
 ; CHECK-NOT: and x{{[0-9]+}}, x[[REG1]], #0x08
 ; CHECK-NOT: cbz x{{[0-9]+}}, .LBB1_3
 
-define void @test2(i64 %A, i64* readonly %B) #0 {
+define void @test2(i64 %A, ptr readonly %B) #0 {
 entry:
-  %tobool = icmp eq i64* %B, null
+  %tobool = icmp eq ptr %B, null
   %and = and i64 %A, 8
   %tobool1 = icmp eq i64 %and, 0
   %or.cond = or i1 %tobool, %tobool1
   br i1 %or.cond, label %if.end3, label %if.then2
 
 if.then2:                                         ; preds = %entry
-  %0 = load i64, i64* %B, align 4
+  %0 = load i64, ptr %B, align 4
   tail call void @foo(i64 %A, i64 %0)
   br label %if.end3
 

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll
index 3c986ba2e5139..8d226398a4f48 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-tryBitfieldInsertOpFromOr-crash.ll
@@ -3,34 +3,34 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
 ; Function Attrs: noreturn nounwind
-define void @foo(i32* %d) {
+define void @foo(ptr %d) {
 entry:
-  %0 = ptrtoint i32* %d to i64
+  %0 = ptrtoint ptr %d to i64
   %1 = and i64 %0, -36028797018963969
-  %2 = inttoptr i64 %1 to i32*
-  %arrayidx5 = getelementptr inbounds i32, i32* %2, i64 1
-  %arrayidx6 = getelementptr inbounds i32, i32* %2, i64 2
-  %arrayidx7 = getelementptr inbounds i32, i32* %2, i64 3
+  %2 = inttoptr i64 %1 to ptr
+  %arrayidx5 = getelementptr inbounds i32, ptr %2, i64 1
+  %arrayidx6 = getelementptr inbounds i32, ptr %2, i64 2
+  %arrayidx7 = getelementptr inbounds i32, ptr %2, i64 3
   br label %for.cond
 
 for.cond:                                         ; preds = %for.cond, %entry
-  %B.0 = phi i32* [ %d, %entry ], [ %12, %for.cond ]
-  %3 = ptrtoint i32* %B.0 to i64
+  %B.0 = phi ptr [ %d, %entry ], [ %12, %for.cond ]
+  %3 = ptrtoint ptr %B.0 to i64
   %4 = and i64 %3, -36028797018963969
-  %5 = inttoptr i64 %4 to i32*
-  %6 = load i32, i32* %5, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %5, i64 1
-  %7 = load i32, i32* %arrayidx1, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %5, i64 2
-  %8 = load i32, i32* %arrayidx2, align 4
-  %arrayidx3 = getelementptr inbounds i32, i32* %5, i64 3
-  %9 = load i32, i32* %arrayidx3, align 4
-  store i32 %6, i32* %2, align 4
-  store i32 %7, i32* %arrayidx5, align 4
-  store i32 %8, i32* %arrayidx6, align 4
-  store i32 %9, i32* %arrayidx7, align 4
-  %10 = ptrtoint i32* %arrayidx1 to i64
+  %5 = inttoptr i64 %4 to ptr
+  %6 = load i32, ptr %5, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %5, i64 1
+  %7 = load i32, ptr %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %5, i64 2
+  %8 = load i32, ptr %arrayidx2, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %5, i64 3
+  %9 = load i32, ptr %arrayidx3, align 4
+  store i32 %6, ptr %2, align 4
+  store i32 %7, ptr %arrayidx5, align 4
+  store i32 %8, ptr %arrayidx6, align 4
+  store i32 %9, ptr %arrayidx7, align 4
+  %10 = ptrtoint ptr %arrayidx1 to i64
   %11 = or i64 %10, 36028797018963968
-  %12 = inttoptr i64 %11 to i32*
+  %12 = inttoptr i64 %11 to ptr
   br label %for.cond
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll b/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll
index 89122d080e4b9..af5f6a9d6924b 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-unroll-and-jam.ll
@@ -3,7 +3,7 @@
 
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 
-define void @unj(i32 %I, i32 %argj, i32* noalias nocapture %A, i32* noalias nocapture readonly %B) #0 {
+define void @unj(i32 %I, i32 %argj, ptr noalias nocapture %A, ptr noalias nocapture readonly %B) #0 {
 ; CHECK-LABEL: @unj(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[ARGJ:%.*]], 0
@@ -21,23 +21,23 @@ define void @unj(i32 %I, i32 %argj, i32* noalias nocapture %A, i32* noalias noca
 ; CHECK-NEXT:    [[SUM_2:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_2:%.*]], [[FOR_INNER]] ]
 ; CHECK-NEXT:    [[J_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[INC_3:%.*]], [[FOR_INNER]] ]
 ; CHECK-NEXT:    [[SUM_3:%.*]] = phi i32 [ 0, [[FOR_OUTER]] ], [ [[ADD_3:%.*]], [[FOR_INNER]] ]
-; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[J]]
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[J]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[SUB:%.*]] = add i32 [[SUM]], 10
 ; CHECK-NEXT:    [[ADD]] = sub i32 [[SUB]], [[TMP0]]
 ; CHECK-NEXT:    [[INC]] = add nuw i32 [[J]], 1
-; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_1]]
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
 ; CHECK-NEXT:    [[SUB_1:%.*]] = add i32 [[SUM_1]], 10
 ; CHECK-NEXT:    [[ADD_1]] = sub i32 [[SUB_1]], [[TMP1]]
 ; CHECK-NEXT:    [[INC_1]] = add nuw i32 [[J_1]], 1
-; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_2]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_2]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX_2]], align 4
 ; CHECK-NEXT:    [[SUB_2:%.*]] = add i32 [[SUM_2]], 10
 ; CHECK-NEXT:    [[ADD_2]] = sub i32 [[SUB_2]], [[TMP2]]
 ; CHECK-NEXT:    [[INC_2]] = add nuw i32 [[J_2]], 1
-; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 [[J_3]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[J_3]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_3]], align 4
 ; CHECK-NEXT:    [[SUB_3:%.*]] = add i32 [[SUM_3]], 10
 ; CHECK-NEXT:    [[ADD_3]] = sub i32 [[SUB_3]], [[TMP3]]
 ; CHECK-NEXT:    [[INC_3]] = add nuw i32 [[J_3]], 1
@@ -48,13 +48,13 @@ define void @unj(i32 %I, i32 %argj, i32* noalias nocapture %A, i32* noalias noca
 ; CHECK-NEXT:    [[ADD_LCSSA_1:%.*]] = phi i32 [ [[ADD_1]], [[FOR_INNER]] ]
 ; CHECK-NEXT:    [[ADD_LCSSA_2:%.*]] = phi i32 [ [[ADD_2]], [[FOR_INNER]] ]
 ; CHECK-NEXT:    [[ADD_LCSSA_3:%.*]] = phi i32 [ [[ADD_3]], [[FOR_INNER]] ]
-; CHECK-NEXT:    store i32 [[ADD_LCSSA]], i32* [[A:%.*]], align 4
-; CHECK-NEXT:    [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 1
-; CHECK-NEXT:    store i32 [[ADD_LCSSA_1]], i32* [[ARRAYIDX6_1]], align 4
-; CHECK-NEXT:    [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 2
-; CHECK-NEXT:    store i32 [[ADD_LCSSA_2]], i32* [[ARRAYIDX6_2]], align 4
-; CHECK-NEXT:    [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 3
-; CHECK-NEXT:    store i32 [[ADD_LCSSA_3]], i32* [[ARRAYIDX6_3]], align 4
+; CHECK-NEXT:    store i32 [[ADD_LCSSA]], ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 1
+; CHECK-NEXT:    store i32 [[ADD_LCSSA_1]], ptr [[ARRAYIDX6_1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 2
+; CHECK-NEXT:    store i32 [[ADD_LCSSA_2]], ptr [[ARRAYIDX6_2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 3
+; CHECK-NEXT:    store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX6_3]], align 4
 ; CHECK-NEXT:    br label [[FOR_END_LOOPEXIT:%.*]]
 ; CHECK:       for.end.loopexit:
 ; CHECK-NEXT:    br label [[FOR_END]]
@@ -75,8 +75,8 @@ for.outer:
 for.inner:
   %j = phi i32 [ 0, %for.outer ], [ %inc, %for.inner ]
   %sum = phi i32 [ 0, %for.outer ], [ %add, %for.inner ]
-  %arrayidx = getelementptr inbounds i32, i32* %B, i32 %j
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %B, i32 %j
+  %0 = load i32, ptr %arrayidx, align 4
   %sub = add i32 %sum, 10
   %add = sub i32 %sub, %0
   %inc = add nuw i32 %j, 1
@@ -84,8 +84,8 @@ for.inner:
   br i1 %exitcond, label %for.latch, label %for.inner
 
 for.latch:
-  %arrayidx6 = getelementptr inbounds i32, i32* %A, i32 %i
-  store i32 %add, i32* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %A, i32 %i
+  store i32 %add, ptr %arrayidx6, align 4
   %add8 = add nuw nsw i32 %i, 1
   %exitcond23 = icmp eq i32 %add8, 4
   br i1 %exitcond23, label %for.end, label %for.outer

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll b/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll
index a71b5e86138df..463084e6fe6a1 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-vcvtfp2fxs-combine.ll
@@ -12,9 +12,9 @@ define void @fun1() local_unnamed_addr {
 entry:
   %mul = fmul <4 x double> zeroinitializer, <double 6.553600e+04, double 6.553600e+04, double 6.553600e+04, double 6.553600e+04>
   %toi = fptosi <4 x double> %mul to <4 x i64>
-  %ptr = getelementptr inbounds %struct.a, %struct.a* undef, i64 0, i32 2
+  %ptr = getelementptr inbounds %struct.a, ptr undef, i64 0, i32 2
   %elem = extractelement <4 x i64> %toi, i32 1
-  store i64 %elem, i64* %ptr, align 8
+  store i64 %elem, ptr %ptr, align 8
   call void @llvm.trap()
   unreachable
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll b/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll
index e17add462ff8c..2430551f7a9f4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-vectorcombine-invalid-extract-index-crash.ll
@@ -5,9 +5,9 @@
 ;
 
 target triple = "aarch64-unknown-linux-gnu"
-define void @test_crash(i8* %dst_ptr) {
+define void @test_crash(ptr %dst_ptr) {
 entry:
-  %vec_load = load <4 x i16>, <4 x i16>* undef, align 8
+  %vec_load = load <4 x i16>, ptr undef, align 8
   %0 = sext <4 x i16> %vec_load to <4 x i32>
   %add71vec = add nsw <4 x i32> %0, <i32 32, i32 32, i32 32, i32 32>
   %add104vec = add nsw <4 x i32> %add71vec, zeroinitializer
@@ -16,13 +16,12 @@ entry:
   %1 = trunc <4 x i32> %vec to <4 x i16>
   %2 = shufflevector <4 x i16> %1, <4 x i16> undef, <2 x i32> <i32 1, i32 2>
   %3 = sext <2 x i16> %2 to <2 x i32>
-  %4 = bitcast i8* %dst_ptr to <4 x i8>*
-  %5 = shufflevector <2 x i32> %3, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
-  %6 = shufflevector <4 x i32> undef, <4 x i32> %5, <4 x i32> <i32 0, i32 4, i32 5, i32 undef>
-  %7 = insertelement <4 x i32> %6, i32 undef, i64 3
-  %8 = add nsw <4 x i32> %7, zeroinitializer
-  %9 = select <4 x i1> zeroinitializer, <4 x i32> %8, <4 x i32> undef
-  %10 = trunc <4 x i32> %9 to <4 x i8>
-  store <4 x i8> %10, <4 x i8>* %4, align 1
+  %4 = shufflevector <2 x i32> %3, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+  %5 = shufflevector <4 x i32> undef, <4 x i32> %4, <4 x i32> <i32 0, i32 4, i32 5, i32 undef>
+  %6 = insertelement <4 x i32> %5, i32 undef, i64 3
+  %7 = add nsw <4 x i32> %6, zeroinitializer
+  %8 = select <4 x i1> zeroinitializer, <4 x i32> %7, <4 x i32> undef
+  %9 = trunc <4 x i32> %8 to <4 x i8>
+  store <4 x i8> %9, ptr %dst_ptr, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll b/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll
index a7b20f25557ca..52457f8d4cab1 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-vuzp.ll
@@ -8,8 +8,8 @@ define i32 @fun1() {
 entry:
   %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> <i8 0, i8 16, i8 19, i8 4, i8 -65, i8 -65, i8 -71, i8 -71, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> undef)
   %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1
-  store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1
+  %scevgep = getelementptr <8 x i8>, ptr undef, i64 1
+  store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1
   ret i32 undef
 }
 
@@ -19,8 +19,8 @@ define i32 @fun2() {
 entry:
   %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> <i8 0, i8 16, i8 19, i8 4, i8 -65, i8 -65, i8 -71, i8 -71, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> undef)
   %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1
-  store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1
+  %scevgep = getelementptr <8 x i8>, ptr undef, i64 1
+  store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1
   ret i32 undef
 }
 
@@ -30,8 +30,8 @@ define i32 @fun3() {
 entry:
   %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> <i8 0, i8 16, i8 19, i8 4, i8 -65, i8 -65, i8 -71, i8 -71, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> undef)
   %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 15>
-  %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1
-  store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1
+  %scevgep = getelementptr <8 x i8>, ptr undef, i64 1
+  store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1
   ret i32 undef
 }
 
@@ -41,21 +41,19 @@ define i32 @fun4() {
 entry:
   %vtbl1.i.1 = tail call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> <i8 0, i8 16, i8 19, i8 4, i8 -65, i8 -65, i8 -71, i8 -71, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> undef)
   %vuzp.i212.1 = shufflevector <16 x i8> %vtbl1.i.1, <16 x i8> undef, <8 x i32> <i32 3, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-  %scevgep = getelementptr <8 x i8>, <8 x i8>* undef, i64 1
-  store <8 x i8> %vuzp.i212.1, <8 x i8>* %scevgep, align 1
+  %scevgep = getelementptr <8 x i8>, ptr undef, i64 1
+  store <8 x i8> %vuzp.i212.1, ptr %scevgep, align 1
   ret i32 undef
 }
 
 ; CHECK-LABEL: pr36582:
 ; Check that this does not ICE.
-define void @pr36582(i8* %p1, i32* %p2) {
+define void @pr36582(ptr %p1, ptr %p2) {
 entry:
-  %x = bitcast i8* %p1 to <8 x i8>*
-  %wide.vec = load <8 x i8>, <8 x i8>* %x, align 1
+  %wide.vec = load <8 x i8>, ptr %p1, align 1
   %strided.vec = shufflevector <8 x i8> %wide.vec, <8 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %y = zext <4 x i8> %strided.vec to <4 x i32>
-  %z = bitcast i32* %p2 to <4 x i32>*
-  store <4 x i32> %y, <4 x i32>* %z, align 4
+  store <4 x i32> %y, ptr %p2, align 4
   ret void
 }
 
@@ -63,8 +61,8 @@ entry:
 ; that the vector blend transform does not scramble the pattern.
 ; CHECK-LABEL: vzipNoBlend:
 ; CHECK: zip1
-define <8 x i8> @vzipNoBlend(<8 x i8>* %A, <8 x i16>* %B) nounwind {
-  %t = load <8 x i8>, <8 x i8>* %A
+define <8 x i8> @vzipNoBlend(ptr %A, ptr %B) nounwind {
+  %t = load <8 x i8>, ptr %A
   %vzip = shufflevector <8 x i8> %t, <8 x i8> <i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef>, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   ret <8 x i8> %vzip
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll b/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll
index b51798be16978..3c26062936250 100644
--- a/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64_f16_be.ll
@@ -10,7 +10,7 @@ define void @test_bitcast_v8f16_to_v4f32(<8 x half> %a) {
 
   %x = alloca <4 x float>, align 16
   %y = bitcast <8 x half> %a to <4 x float>
-  store <4 x float> %y, <4 x float>* %x, align 16
+  store <4 x float> %y, ptr %x, align 16
   ret void
 }
 
@@ -23,7 +23,7 @@ define void @test_bitcast_v8f16_to_v2f64(<8 x half> %a) {
 
   %x = alloca <2 x double>, align 16
   %y = bitcast <8 x half> %a to <2 x double>
-  store <2 x double> %y, <2 x double>* %x, align 16
+  store <2 x double> %y, ptr %x, align 16
   ret void
 }
 
@@ -36,7 +36,7 @@ define void @test_bitcast_v8f16_to_fp128(<8 x half> %a) {
 
   %x = alloca fp128, align 16
   %y = bitcast <8 x half> %a to fp128
-  store fp128 %y, fp128* %x, align 16
+  store fp128 %y, ptr %x, align 16
   ret void
 }
 
@@ -49,7 +49,7 @@ define void @test_bitcast_v4f16_to_v2f32(<4 x half> %a) {
 
   %x = alloca <2 x float>, align 8
   %y = bitcast <4 x half> %a to <2 x float>
-  store <2 x float> %y, <2 x float>* %x, align 8
+  store <2 x float> %y, ptr %x, align 8
   ret void
 }
 
@@ -62,6 +62,6 @@ define void @test_bitcast_v4f16_to_v1f64(<4 x half> %a) {
 
   %x = alloca <1 x double>, align 8
   %y = bitcast <4 x half> %a to <1 x double>
-  store <1 x double> %y, <1 x double>* %x, align 8
+  store <1 x double> %y, ptr %x, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
index 50ec43d8862ac..e7c3db33814f0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
@@ -18,20 +18,19 @@ define win64cc void @pass_va(i32 %count, ...) nounwind {
 ; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  call void @other_func(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  call void @other_func(ptr %ap2)
   ret void
 }
 
-declare void @other_func(i8*) local_unnamed_addr
+declare void @other_func(ptr) local_unnamed_addr
 
-declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_copy(i8*, i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
+declare void @llvm.va_copy(ptr, ptr) nounwind
 
-define win64cc i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind {
+define win64cc ptr @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind {
 ; CHECK-LABEL: f9:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x18, [sp, #-16]! // 8-byte Folded Spill
@@ -41,14 +40,13 @@ define win64cc i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64
 ; CHECK-NEXT:    ldr x18, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  ret i8* %ap2
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  ret ptr %ap2
 }
 
-define win64cc i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind {
+define win64cc ptr @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind {
 ; CHECK-LABEL: f8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x18, [sp, #-16]! // 8-byte Folded Spill
@@ -58,14 +56,13 @@ define win64cc i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64
 ; CHECK-NEXT:    ldr x18, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  ret i8* %ap2
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  ret ptr %ap2
 }
 
-define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
+define win64cc ptr @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x18, [sp, #-32]! // 8-byte Folded Spill
@@ -76,9 +73,8 @@ define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64
 ; CHECK-NEXT:    ldr x18, [sp], #32 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  ret i8* %ap2
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  ret ptr %ap2
 }

diff  --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
index 91c7ee7292c63..0df9131f3c554 100644
--- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll
+++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s | FileCheck %s
 target triple = "arm64-apple-ios7.0"
 
-define i64 @foo(i64* nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 {
+define i64 @foo(ptr nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 {
 ; CHECK-LABEL: foo:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr w8, [x0, #4]
@@ -13,7 +13,7 @@ define i64 @foo(i64* nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unna
 ; CHECK-NEXT:    ret
 entry:
   %0 = lshr i64 %a, 32
-  %1 = load i64, i64* %ptr, align 8
+  %1 = load i64, ptr %ptr, align 8
   %2 = lshr i64 %1, 32
   %3 = mul nuw i64 %2, %0
   %4 = add i64 %c, %b

diff  --git a/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll b/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll
index a6bc36441b115..2de708d66f59f 100644
--- a/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll
+++ b/llvm/test/CodeGen/AArch64/addr-of-ret-addr.ll
@@ -2,22 +2,22 @@
 
 ; Test generated from C code:
 ; #include <stdarg.h>
-; void *foo() {
+; ptr foo() {
 ;   return _AddressOfReturnAddress();
 ; }
-; int bar(int x(va_list, void*), ...) {
+; int bar(int x(va_list, ptr), ...) {
 ;   va_list y;
 ;   va_start(y, x);
 ;   return x(y, _AddressOfReturnAddress()) + 1;
 ; }
 
-declare void @llvm.va_start(i8*)
-declare i8* @llvm.addressofreturnaddress()
+declare void @llvm.va_start(ptr)
+declare ptr @llvm.addressofreturnaddress()
 
-define dso_local i8* @"foo"() {
+define dso_local ptr @"foo"() {
 entry:
-  %0 = call i8* @llvm.addressofreturnaddress()
-  ret i8* %0
+  %0 = call ptr @llvm.addressofreturnaddress()
+  ret ptr %0
 
 ; CHECK-LABEL: foo
 ; CHECK: stp x29, x30, [sp, #-16]!
@@ -26,17 +26,16 @@ entry:
 ; CHECK: ldp x29, x30, [sp], #16
 }
 
-define dso_local i32 @"bar"(i32 (i8*, i8*)* %x, ...) {
+define dso_local i32 @"bar"(ptr %x, ...) {
 entry:
-  %x.addr = alloca i32 (i8*, i8*)*, align 8
-  %y = alloca i8*, align 8
-  store i32 (i8*, i8*)* %x, i32 (i8*, i8*)** %x.addr, align 8
-  %y1 = bitcast i8** %y to i8*
-  call void @llvm.va_start(i8* %y1)
-  %0 = load i32 (i8*, i8*)*, i32 (i8*, i8*)** %x.addr, align 8
-  %1 = call i8* @llvm.addressofreturnaddress()
-  %2 = load i8*, i8** %y, align 8
-  %call = call i32 %0(i8* %2, i8* %1)
+  %x.addr = alloca ptr, align 8
+  %y = alloca ptr, align 8
+  store ptr %x, ptr %x.addr, align 8
+  call void @llvm.va_start(ptr %y)
+  %0 = load ptr, ptr %x.addr, align 8
+  %1 = call ptr @llvm.addressofreturnaddress()
+  %2 = load ptr, ptr %y, align 8
+  %call = call i32 %0(ptr %2, ptr %1)
   %add = add nsw i32 %call, 1
   ret i32 %add
 

diff  --git a/llvm/test/CodeGen/AArch64/addsub-shifted.ll b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
index d5c6eacc74235..2580d3532ba0d 100644
--- a/llvm/test/CodeGen/AArch64/addsub-shifted.ll
+++ b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
@@ -9,66 +9,66 @@
 define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
 ; CHECK-LABEL: test_lsl_arith:
 
-  %rhs1 = load volatile i32, i32* @var32
+  %rhs1 = load volatile i32, ptr @var32
   %shift1 = shl i32 %rhs1, 18
   %val1 = add i32 %lhs32, %shift1
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
 
-  %rhs2 = load volatile i32, i32* @var32
+  %rhs2 = load volatile i32, ptr @var32
   %shift2 = shl i32 %rhs2, 31
   %val2 = add i32 %shift2, %lhs32
-  store volatile i32 %val2, i32* @var32
+  store volatile i32 %val2, ptr @var32
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
 
-  %rhs3 = load volatile i32, i32* @var32
+  %rhs3 = load volatile i32, ptr @var32
   %shift3 = shl i32 %rhs3, 5
   %val3 = sub i32 %lhs32, %shift3
-  store volatile i32 %val3, i32* @var32
+  store volatile i32 %val3, ptr @var32
 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
 
 ; Subtraction is not commutative!
-  %rhs4 = load volatile i32, i32* @var32
+  %rhs4 = load volatile i32, ptr @var32
   %shift4 = shl i32 %rhs4, 19
   %val4 = sub i32 %shift4, %lhs32
-  store volatile i32 %val4, i32* @var32
+  store volatile i32 %val4, ptr @var32
 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
 
-  %lhs4a = load volatile i32, i32* @var32
+  %lhs4a = load volatile i32, ptr @var32
   %shift4a = shl i32 %lhs4a, 15
   %val4a = sub i32 0, %shift4a
-  store volatile i32 %val4a, i32* @var32
+  store volatile i32 %val4a, ptr @var32
 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
 
-  %rhs5 = load volatile i64, i64* @var64
+  %rhs5 = load volatile i64, ptr @var64
   %shift5 = shl i64 %rhs5, 18
   %val5 = add i64 %lhs64, %shift5
-  store volatile i64 %val5, i64* @var64
+  store volatile i64 %val5, ptr @var64
 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
 
-  %rhs6 = load volatile i64, i64* @var64
+  %rhs6 = load volatile i64, ptr @var64
   %shift6 = shl i64 %rhs6, 31
   %val6 = add i64 %shift6, %lhs64
-  store volatile i64 %val6, i64* @var64
+  store volatile i64 %val6, ptr @var64
 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
 
-  %rhs7 = load volatile i64, i64* @var64
+  %rhs7 = load volatile i64, ptr @var64
   %shift7 = shl i64 %rhs7, 5
   %val7 = sub i64 %lhs64, %shift7
-  store volatile i64 %val7, i64* @var64
+  store volatile i64 %val7, ptr @var64
 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
 
 ; Subtraction is not commutative!
-  %rhs8 = load volatile i64, i64* @var64
+  %rhs8 = load volatile i64, ptr @var64
   %shift8 = shl i64 %rhs8, 19
   %val8 = sub i64 %shift8, %lhs64
-  store volatile i64 %val8, i64* @var64
+  store volatile i64 %val8, ptr @var64
 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
 
-  %lhs8a = load volatile i64, i64* @var64
+  %lhs8a = load volatile i64, ptr @var64
   %shift8a = shl i64 %lhs8a, 60
   %val8a = sub i64 0, %shift8a
-  store volatile i64 %val8a, i64* @var64
+  store volatile i64 %val8a, ptr @var64
 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
 
   ret void
@@ -80,54 +80,54 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
 
   %shift1 = lshr i32 %rhs32, 18
   %val1 = add i32 %lhs32, %shift1
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
 
   %shift2 = lshr i32 %rhs32, 31
   %val2 = add i32 %shift2, %lhs32
-  store volatile i32 %val2, i32* @var32
+  store volatile i32 %val2, ptr @var32
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
 
   %shift3 = lshr i32 %rhs32, 5
   %val3 = sub i32 %lhs32, %shift3
-  store volatile i32 %val3, i32* @var32
+  store volatile i32 %val3, ptr @var32
 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
 
 ; Subtraction is not commutative!
   %shift4 = lshr i32 %rhs32, 19
   %val4 = sub i32 %shift4, %lhs32
-  store volatile i32 %val4, i32* @var32
+  store volatile i32 %val4, ptr @var32
 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
 
   %shift4a = lshr i32 %lhs32, 15
   %val4a = sub i32 0, %shift4a
-  store volatile i32 %val4a, i32* @var32
+  store volatile i32 %val4a, ptr @var32
 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
 
   %shift5 = lshr i64 %rhs64, 18
   %val5 = add i64 %lhs64, %shift5
-  store volatile i64 %val5, i64* @var64
+  store volatile i64 %val5, ptr @var64
 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
 
   %shift6 = lshr i64 %rhs64, 31
   %val6 = add i64 %shift6, %lhs64
-  store volatile i64 %val6, i64* @var64
+  store volatile i64 %val6, ptr @var64
 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
 
   %shift7 = lshr i64 %rhs64, 5
   %val7 = sub i64 %lhs64, %shift7
-  store volatile i64 %val7, i64* @var64
+  store volatile i64 %val7, ptr @var64
 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
 
 ; Subtraction is not commutative!
   %shift8 = lshr i64 %rhs64, 19
   %val8 = sub i64 %shift8, %lhs64
-  store volatile i64 %val8, i64* @var64
+  store volatile i64 %val8, ptr @var64
 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
 
   %shift8a = lshr i64 %lhs64, 45
   %val8a = sub i64 0, %shift8a
-  store volatile i64 %val8a, i64* @var64
+  store volatile i64 %val8a, ptr @var64
 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
 
   ret void
@@ -139,54 +139,54 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
 
   %shift1 = ashr i32 %rhs32, 18
   %val1 = add i32 %lhs32, %shift1
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
 
   %shift2 = ashr i32 %rhs32, 31
   %val2 = add i32 %shift2, %lhs32
-  store volatile i32 %val2, i32* @var32
+  store volatile i32 %val2, ptr @var32
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
 
   %shift3 = ashr i32 %rhs32, 5
   %val3 = sub i32 %lhs32, %shift3
-  store volatile i32 %val3, i32* @var32
+  store volatile i32 %val3, ptr @var32
 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
 
 ; Subtraction is not commutative!
   %shift4 = ashr i32 %rhs32, 19
   %val4 = sub i32 %shift4, %lhs32
-  store volatile i32 %val4, i32* @var32
+  store volatile i32 %val4, ptr @var32
 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
 
   %shift4a = ashr i32 %lhs32, 15
   %val4a = sub i32 0, %shift4a
-  store volatile i32 %val4a, i32* @var32
+  store volatile i32 %val4a, ptr @var32
 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
 
   %shift5 = ashr i64 %rhs64, 18
   %val5 = add i64 %lhs64, %shift5
-  store volatile i64 %val5, i64* @var64
+  store volatile i64 %val5, ptr @var64
 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
 
   %shift6 = ashr i64 %rhs64, 31
   %val6 = add i64 %shift6, %lhs64
-  store volatile i64 %val6, i64* @var64
+  store volatile i64 %val6, ptr @var64
 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
 
   %shift7 = ashr i64 %rhs64, 5
   %val7 = sub i64 %lhs64, %shift7
-  store volatile i64 %val7, i64* @var64
+  store volatile i64 %val7, ptr @var64
 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
 
 ; Subtraction is not commutative!
   %shift8 = ashr i64 %rhs64, 19
   %val8 = sub i64 %shift8, %lhs64
-  store volatile i64 %val8, i64* @var64
+  store volatile i64 %val8, ptr @var64
 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
 
   %shift8a = ashr i64 %lhs64, 45
   %val8a = sub i64 0, %shift8a
-  store volatile i64 %val8a, i64* @var64
+  store volatile i64 %val8a, ptr @var64
 ; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
 
   ret void
@@ -202,42 +202,42 @@ define void @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64, i32 %v) {
 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
 
 t2:
-  store volatile i32 %v, i32* @var32
+  store volatile i32 %v, ptr @var32
   %shift2 = lshr i32 %rhs32, 20
   %tst2 = icmp ne i32 %lhs32, %shift2
   br i1 %tst2, label %t3, label %end
 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
 
 t3:
-  store volatile i32 %v, i32* @var32
+  store volatile i32 %v, ptr @var32
   %shift3 = ashr i32 %rhs32, 9
   %tst3 = icmp ne i32 %lhs32, %shift3
   br i1 %tst3, label %t4, label %end
 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
 
 t4:
-  store volatile i32 %v, i32* @var32
+  store volatile i32 %v, ptr @var32
   %shift4 = shl i64 %rhs64, 43
   %tst4 = icmp uge i64 %lhs64, %shift4
   br i1 %tst4, label %t5, label %end
 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
 
 t5:
-  store volatile i32 %v, i32* @var32
+  store volatile i32 %v, ptr @var32
   %shift5 = lshr i64 %rhs64, 20
   %tst5 = icmp ne i64 %lhs64, %shift5
   br i1 %tst5, label %t6, label %end
 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
 
 t6:
-  store volatile i32 %v, i32* @var32
+  store volatile i32 %v, ptr @var32
   %shift6 = ashr i64 %rhs64, 59
   %tst6 = icmp ne i64 %lhs64, %shift6
   br i1 %tst6, label %t7, label %end
 ; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
 
 t7:
-  store volatile i32 %v, i32* @var32
+  store volatile i32 %v, ptr @var32
   br label %end
 
 end:

diff  --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll
index 3020576e0bbf0..3848a3304c7dc 100644
--- a/llvm/test/CodeGen/AArch64/addsub.ll
+++ b/llvm/test/CodeGen/AArch64/addsub.ll
@@ -25,13 +25,13 @@ define void @add_small() {
 ; CHECK-NEXT:    str x11, [x9]
 ; CHECK-NEXT:    ret
 
-  %val32 = load i32, i32* @var_i32
+  %val32 = load i32, ptr @var_i32
   %newval32 = add i32 %val32, 4095
-  store i32 %newval32, i32* @var_i32
+  store i32 %newval32, ptr @var_i32
 
-  %val64 = load i64, i64* @var_i64
+  %val64 = load i64, ptr @var_i64
   %newval64 = add i64 %val64, 52
-  store i64 %newval64, i64* @var_i64
+  store i64 %newval64, ptr @var_i64
 
   ret void
 }
@@ -45,7 +45,7 @@ define void @add_small() {
 ; whereas this can be achieved with:
 ; wA = ldrb
 ; xC = add xA, #12 ; <- xA implicitly zero extend wA.
-define void @add_small_imm(i8* %p, i64* %q, i32 %b, i32* %addr) {
+define void @add_small_imm(ptr %p, ptr %q, i32 %b, ptr %addr) {
 ; CHECK-LABEL: add_small_imm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -56,15 +56,15 @@ define void @add_small_imm(i8* %p, i64* %q, i32 %b, i32* %addr) {
 ; CHECK-NEXT:    ret
 entry:
 
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %promoted = zext i8 %t to i64
   %zextt = zext i8 %t to i32
   %add = add nuw i32 %zextt, %b
 
   %add2 = add nuw i64 %promoted, 12
-  store i32 %add, i32* %addr
+  store i32 %add, ptr %addr
 
-  store i64 %add2, i64* %q
+  store i64 %add2, ptr %q
   ret void
 }
 
@@ -84,13 +84,13 @@ define void @add_med() {
 ; CHECK-NEXT:    str x11, [x9]
 ; CHECK-NEXT:    ret
 
-  %val32 = load i32, i32* @var_i32
+  %val32 = load i32, ptr @var_i32
   %newval32 = add i32 %val32, 14610432 ; =0xdef000
-  store i32 %newval32, i32* @var_i32
+  store i32 %newval32, ptr @var_i32
 
-  %val64 = load i64, i64* @var_i64
+  %val64 = load i64, ptr @var_i64
   %newval64 = add i64 %val64, 16773120 ; =0xfff000
-  store i64 %newval64, i64* @var_i64
+  store i64 %newval64, ptr @var_i64
 
   ret void
 }
@@ -111,13 +111,13 @@ define void @sub_small() {
 ; CHECK-NEXT:    str x11, [x9]
 ; CHECK-NEXT:    ret
 
-  %val32 = load i32, i32* @var_i32
+  %val32 = load i32, ptr @var_i32
   %newval32 = sub i32 %val32, 4095
-  store i32 %newval32, i32* @var_i32
+  store i32 %newval32, ptr @var_i32
 
-  %val64 = load i64, i64* @var_i64
+  %val64 = load i64, ptr @var_i64
   %newval64 = sub i64 %val64, 52
-  store i64 %newval64, i64* @var_i64
+  store i64 %newval64, ptr @var_i64
 
   ret void
 }
@@ -138,13 +138,13 @@ define void @sub_med() {
 ; CHECK-NEXT:    str x11, [x9]
 ; CHECK-NEXT:    ret
 
-  %val32 = load i32, i32* @var_i32
+  %val32 = load i32, ptr @var_i32
   %newval32 = sub i32 %val32, 14610432 ; =0xdef000
-  store i32 %newval32, i32* @var_i32
+  store i32 %newval32, ptr @var_i32
 
-  %val64 = load i64, i64* @var_i64
+  %val64 = load i64, ptr @var_i64
   %newval64 = sub i64 %val64, 16773120 ; =0xfff000
-  store i64 %newval64, i64* @var_i64
+  store i64 %newval64, ptr @var_i64
 
   ret void
 }
@@ -309,39 +309,39 @@ define void @testing() {
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:  .LBB16_6: // %common.ret
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* @var_i32
-  %val2 = load i32, i32* @var2_i32
+  %val = load i32, ptr @var_i32
+  %val2 = load i32, ptr @var2_i32
 
   %cmp_pos_small = icmp ne i32 %val, 4095
   br i1 %cmp_pos_small, label %ret, label %test2
 
 test2:
   %newval2 = add i32 %val, 1
-  store i32 %newval2, i32* @var_i32
+  store i32 %newval2, ptr @var_i32
   %cmp_pos_big = icmp ult i32 %val2, 14610432
   br i1 %cmp_pos_big, label %ret, label %test3
 
 test3:
   %newval3 = add i32 %val, 2
-  store i32 %newval3, i32* @var_i32
+  store i32 %newval3, ptr @var_i32
   %cmp_pos_slt = icmp slt i32 %val, 123
   br i1 %cmp_pos_slt, label %ret, label %test4
 
 test4:
   %newval4 = add i32 %val, 3
-  store i32 %newval4, i32* @var_i32
+  store i32 %newval4, ptr @var_i32
   %cmp_pos_sgt = icmp sgt i32 %val2, 321
   br i1 %cmp_pos_sgt, label %ret, label %test5
 
 test5:
   %newval5 = add i32 %val, 4
-  store i32 %newval5, i32* @var_i32
+  store i32 %newval5, ptr @var_i32
   %cmp_neg_uge = icmp sgt i32 %val2, -444
   br i1 %cmp_neg_uge, label %ret, label %test6
 
 test6:
   %newval6 = add i32 %val, 5
-  store i32 %newval6, i32* @var_i32
+  store i32 %newval6, ptr @var_i32
   ret void
 
 ret:
@@ -350,7 +350,7 @@ ret:
 
 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
 
-define i1 @sadd_add(i32 %a, i32 %b, i32* %p) {
+define i1 @sadd_add(i32 %a, i32 %b, ptr %p) {
 ; CHECK-LABEL: sadd_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w0
@@ -364,13 +364,13 @@ define i1 @sadd_add(i32 %a, i32 %b, i32* %p) {
   %e0 = extractvalue {i32, i1} %a0, 0
   %e1 = extractvalue {i32, i1} %a0, 1
   %res = add i32 %e0, 1
-  store i32 %res, i32* %p
+  store i32 %res, ptr %p
   ret i1 %e1
 }
 
 declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
 
-define i1 @uadd_add(i8 %a, i8 %b, i8* %p) {
+define i1 @uadd_add(i8 %a, i8 %b, ptr %p) {
 ; CHECK-LABEL: uadd_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #255
@@ -385,7 +385,7 @@ define i1 @uadd_add(i8 %a, i8 %b, i8* %p) {
   %e0 = extractvalue {i8, i1} %a0, 0
   %e1 = extractvalue {i8, i1} %a0, 1
   %res = add i8 %e0, 1
-  store i8 %res, i8* %p
+  store i8 %res, ptr %p
   ret i1 %e1
 }
 
@@ -636,7 +636,7 @@ define dso_local i32 @neigh_periodic_work_tbl_1() {
 ; CHECK-NEXT:  .LBB35_2: // %if.end
 ; CHECK-NEXT:    ret
 entry:
-  br i1 icmp slt (i64 add (i64 ptrtoint (i32 ()* @neigh_periodic_work_tbl_1 to i64), i64 75000), i64 0), label %for.cond, label %if.end
+  br i1 icmp slt (i64 add (i64 ptrtoint (ptr @neigh_periodic_work_tbl_1 to i64), i64 75000), i64 0), label %for.cond, label %if.end
 for.cond:                                         ; preds = %entry, %for.cond
   br label %for.cond
 if.end:                                           ; preds = %entry
@@ -676,19 +676,19 @@ define dso_local i32 @_extract_crng_crng() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  br i1 icmp slt (i32 ()* @_extract_crng_crng, i32 ()* null), label %if.then, label %lor.lhs.false
+  br i1 icmp slt (ptr @_extract_crng_crng, ptr null), label %if.then, label %lor.lhs.false
 lor.lhs.false:                                    ; preds = %entry
-  %0 = load i32, i32* @jiffies, align 4
+  %0 = load i32, ptr @jiffies, align 4
   %idx.ext = sext i32 %0 to i64
   %idx.neg = sub nsw i64 0, %idx.ext
-  %add.ptr = getelementptr i8, i8* getelementptr (i8, i8* bitcast (i32 ()* @_extract_crng_crng to i8*), i64 75000), i64 %idx.neg
-  %cmp = icmp slt i8* %add.ptr, null
+  %add.ptr = getelementptr i8, ptr getelementptr (i8, ptr @_extract_crng_crng, i64 75000), i64 %idx.neg
+  %cmp = icmp slt ptr %add.ptr, null
   br i1 %cmp, label %if.then, label %if.end
 if.then:                                          ; preds = %lor.lhs.false, %entry
-  %1 = load i32, i32* @primary_crng, align 4
+  %1 = load i32, ptr @primary_crng, align 4
   %tobool.not = icmp eq i32 %1, 0
-  %cond = select i1 %tobool.not, i32* null, i32* @input_pool
-  %call = tail call i32 bitcast (i32 (...)* @crng_reseed to i32 (i32*)*)(i32* noundef %cond)
+  %cond = select i1 %tobool.not, ptr null, ptr @input_pool
+  %call = tail call i32 @crng_reseed(ptr noundef %cond)
   br label %if.end
 if.end:                                           ; preds = %if.then, %lor.lhs.false
   ret i32 undef

diff  --git a/llvm/test/CodeGen/AArch64/alloca.ll b/llvm/test/CodeGen/AArch64/alloca.ll
index e7906a1e9d2fd..ca3a500d79f32 100644
--- a/llvm/test/CodeGen/AArch64/alloca.ll
+++ b/llvm/test/CodeGen/AArch64/alloca.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=arm64-apple-ios -disable-post-ra -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=CHECK-MACHO
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -disable-post-ra -mattr=-fp-armv8 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOFP-ARM64 %s
 
-declare void @use_addr(i8*)
+declare void @use_addr(ptr)
 
 define void @test_simple_alloca(i64 %n) {
 ; CHECK-LABEL: test_simple_alloca:
@@ -19,7 +19,7 @@ define void @test_simple_alloca(i64 %n) {
 ; CHECK: sub [[NEWSP:x[0-9]+]], [[TMP]], [[SPDELTA]]
 ; CHECK: mov sp, [[NEWSP]]
 
-  call void @use_addr(i8* %buf)
+  call void @use_addr(ptr %buf)
 ; CHECK: bl use_addr
 
   ret void
@@ -28,7 +28,7 @@ define void @test_simple_alloca(i64 %n) {
 ; CHECK: ret
 }
 
-declare void @use_addr_loc(i8*, i64*)
+declare void @use_addr_loc(ptr, ptr)
 
 define i64 @test_alloca_with_local(i64 %n) {
 ; CHECK-LABEL: test_alloca_with_local:
@@ -49,10 +49,10 @@ define i64 @test_alloca_with_local(i64 %n) {
 
 ; CHECK: sub {{x[0-9]+}}, x29, #[[LOC_FROM_FP:[0-9]+]]
 
-  call void @use_addr_loc(i8* %buf, i64* %loc)
+  call void @use_addr_loc(ptr %buf, ptr %loc)
 ; CHECK: bl use_addr
 
-  %val = load i64, i64* %loc
+  %val = load i64, ptr %loc
 
 ; CHECK: ldur x0, [x29, #-[[LOC_FROM_FP]]]
 
@@ -99,7 +99,7 @@ define void @test_variadic_alloca(i64 %n, ...) {
 
   %addr = alloca i8, i64 %n
 
-  call void @use_addr(i8* %addr)
+  call void @use_addr(ptr %addr)
 ; CHECK: bl use_addr
 
   ret void
@@ -132,7 +132,7 @@ define void @test_alloca_large_frame(i64 %n) {
   %addr1 = alloca i8, i64 %n
   %addr2 = alloca i64, i64 1000000
 
-  call void @use_addr_loc(i8* %addr1, i64* %addr2)
+  call void @use_addr_loc(ptr %addr1, ptr %addr2)
 
   ret void
 
@@ -145,13 +145,13 @@ define void @test_alloca_large_frame(i64 %n) {
 ; CHECK-MACHO: ldp     x20, x19, [sp], #32
 }
 
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
 
 define void @test_scoped_alloca(i64 %n) {
 ; CHECK-LABEL: test_scoped_alloca:
 
-  %sp = call i8* @llvm.stacksave()
+  %sp = call ptr @llvm.stacksave()
 ; CHECK: mov x29, sp
 ; CHECK: mov [[SAVED_SP:x[0-9]+]], sp
 ; CHECK: mov [[OLDSP:x[0-9]+]], sp
@@ -161,10 +161,10 @@ define void @test_scoped_alloca(i64 %n) {
 ; CHECK-DAG: sub [[NEWSP:x[0-9]+]], [[OLDSP]], [[SPDELTA]]
 ; CHECK: mov sp, [[NEWSP]]
 
-  call void @use_addr(i8* %addr)
+  call void @use_addr(ptr %addr)
 ; CHECK: bl use_addr
 
-  call void @llvm.stackrestore(i8* %sp)
+  call void @llvm.stackrestore(ptr %sp)
 ; CHECK: mov sp, [[SAVED_SP]]
 
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/analyzecmp.ll b/llvm/test/CodeGen/AArch64/analyzecmp.ll
index 0b3bcd887b5b6..3d7644e320cc2 100644
--- a/llvm/test/CodeGen/AArch64/analyzecmp.ll
+++ b/llvm/test/CodeGen/AArch64/analyzecmp.ll
@@ -7,7 +7,7 @@
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "arm64--linux-gnueabi"
 
-define void @test(i64 %a, i64* %ptr1, i64* %ptr2) #0 align 2 {
+define void @test(i64 %a, ptr %ptr1, ptr %ptr2) #0 align 2 {
 entry:
   %conv = and i64 %a, 4294967295
   %add = add nsw i64 %conv, -1
@@ -24,8 +24,8 @@ if.then:
 exit:                 
   %__n = phi i64 [ %add3, %if.then ], [ %div, %entry ]
   %__n.0 = phi i64 [ %add2, %if.then ], [ %rem, %entry ]
-  store i64 %__n, i64* %ptr1
-  store i64 %__n.0, i64* %ptr2
+  store i64 %__n, ptr %ptr1
+  store i64 %__n.0, ptr %ptr2
   ret void 
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/and-mask-removal.ll b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
index d5b9ca253e8da..f3307144e08df 100644
--- a/llvm/test/CodeGen/AArch64/and-mask-removal.ll
+++ b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
@@ -47,16 +47,16 @@ define void @new_position(i32 %pos) {
 ; CHECK-GI-NEXT:    ret
 entry:
   %idxprom = sext i32 %pos to i64
-  %arrayidx = getelementptr inbounds [400 x i8], [400 x i8]* @board, i64 0, i64 %idxprom
-  %tmp = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds [400 x i8], ptr @board, i64 0, i64 %idxprom
+  %tmp = load i8, ptr %arrayidx, align 1
   %.off = add i8 %tmp, -1
   %switch = icmp ult i8 %.off, 2
   br i1 %switch, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %tmp1 = load i32, i32* @next_string, align 4
-  %arrayidx8 = getelementptr inbounds [400 x i32], [400 x i32]* @string_number, i64 0, i64 %idxprom
-  store i32 %tmp1, i32* %arrayidx8, align 4
+  %tmp1 = load i32, ptr @next_string, align 4
+  %arrayidx8 = getelementptr inbounds [400 x i32], ptr @string_number, i64 0, i64 %idxprom
+  store i32 %tmp1, ptr %arrayidx8, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/AArch64/and-sink.ll b/llvm/test/CodeGen/AArch64/and-sink.ll
index 9b4a627b1efc8..f4e9551259e4e 100644
--- a/llvm/test/CodeGen/AArch64/and-sink.ll
+++ b/llvm/test/CodeGen/AArch64/and-sink.ll
@@ -24,7 +24,7 @@ bb0:
 ; CHECK-CGP-NEXT: store
 ; CHECK-CGP-NEXT: br
   %cmp = icmp eq i32 %and, 0
-  store i32 0, i32* @A
+  store i32 0, ptr @A
   br i1 %cmp, label %bb1, label %bb2
 bb1:
   ret i32 1
@@ -45,14 +45,14 @@ define dso_local i32 @and_sink2(i32 %a, i1 %c, i1 %c2) {
 ; CHECK-CGP-LABEL: @and_sink2(
 ; CHECK-CGP-NOT: and i32
   %and = and i32 %a, 4
-  store i32 0, i32* @A
+  store i32 0, ptr @A
   br i1 %c, label %bb0, label %bb3
 bb0:
 ; CHECK-CGP-LABEL: bb0:
 ; CHECK-CGP-NOT: and i32
 ; CHECK-CGP-NOT: icmp
   %cmp = icmp eq i32 %and, 0
-  store i32 0, i32* @B
+  store i32 0, ptr @B
   br i1 %c2, label %bb1, label %bb3
 bb1:
 ; CHECK-CGP-LABEL: bb1:
@@ -60,7 +60,7 @@ bb1:
 ; CHECK-CGP-NEXT: icmp eq i32
 ; CHECK-CGP-NEXT: store
 ; CHECK-CGP-NEXT: br
-  store i32 0, i32* @C
+  store i32 0, ptr @C
   br i1 %cmp, label %bb2, label %bb0
 bb2:
   ret i32 1
@@ -84,7 +84,7 @@ bb0:
 ; CHECK-CGP-LABEL: bb0:
 ; CHECK-CGP-NOT: and i32
   %cmp = icmp eq i32 %and, 0
-  store i32 0, i32* @A
+  store i32 0, ptr @A
   br i1 %cmp, label %bb0, label %bb2
 bb2:
   ret i32 0

diff  --git a/llvm/test/CodeGen/AArch64/andorbrcompare.ll b/llvm/test/CodeGen/AArch64/andorbrcompare.ll
index 8a16d9af1b06d..a2485495ec72f 100644
--- a/llvm/test/CodeGen/AArch64/andorbrcompare.ll
+++ b/llvm/test/CodeGen/AArch64/andorbrcompare.ll
@@ -4,7 +4,7 @@
 
 declare void @dummy()
 
-define i32 @and_eq_ne_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_eq_ne_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_eq_ne_ult:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -46,14 +46,14 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:
   ret i32 0
 }
 
-define i32 @and_ne_ult_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_ne_ult_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_ne_ult_ule:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -95,14 +95,14 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:
   ret i32 0
 }
 
-define i32 @and_ult_ule_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_ult_ule_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_ult_ule_ugt:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -144,14 +144,14 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:
   ret i32 0
 }
 
-define i32 @and_ule_ugt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_ule_ugt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_ule_ugt_uge:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -193,14 +193,14 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:
   ret i32 0
 }
 
-define i32 @and_ugt_uge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_ugt_uge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_ugt_uge_slt:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -242,14 +242,14 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:
   ret i32 0
 }
 
-define i32 @and_uge_slt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_uge_slt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_uge_slt_sle:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -291,14 +291,14 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:
   ret i32 0
 }
 
-define i32 @and_slt_sle_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_slt_sle_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_slt_sle_sgt:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -340,14 +340,14 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:
   ret i32 0
 }
 
-define i32 @and_sle_sgt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32* %p) {
+define i32 @and_sle_sgt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
 ; SDISEL-LABEL: and_sle_sgt_sge:
 ; SDISEL:       // %bb.0: // %entry
 ; SDISEL-NEXT:    cmp w2, w3
@@ -389,7 +389,7 @@ entry:
   br i1 %o, label %if, label %else
 
 if:
-  store i32 1, i32* %p
+  store i32 1, ptr %p
   ret i32 1
 
 else:

diff  --git a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
index 0b11b1555fb88..e90f89f359ac7 100644
--- a/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
+++ b/llvm/test/CodeGen/AArch64/argument-blocks-array-of-struct.ll
@@ -394,7 +394,7 @@ define void @caller_in_block() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %1 = call %T_IN_BLOCK @return_in_block()
-  store %T_IN_BLOCK %1, %T_IN_BLOCK* @in_block_store
+  store %T_IN_BLOCK %1, ptr @in_block_store
   ret void
 }
 
@@ -410,7 +410,7 @@ define void @callee_in_block(%T_IN_BLOCK %a) {
 ; CHECK-NEXT:    str d1, [x8, #8]
 ; CHECK-NEXT:    str d0, [x8]
 ; CHECK-NEXT:    ret
-  store %T_IN_BLOCK %a, %T_IN_BLOCK* @in_block_store
+  store %T_IN_BLOCK %a, ptr @in_block_store
   ret void
 }
 
@@ -428,7 +428,7 @@ define void @argument_in_block() {
 ; CHECK-NEXT:    bl callee_in_block
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %1 = load %T_IN_BLOCK, %T_IN_BLOCK* @in_block_store
+  %1 = load %T_IN_BLOCK, ptr @in_block_store
   call void @callee_in_block(%T_IN_BLOCK %1)
   ret void
 }
@@ -471,7 +471,7 @@ define void @caller_in_memory() {
 ; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
   %1 = call %T_IN_MEMORY @return_in_memory()
-  store %T_IN_MEMORY %1, %T_IN_MEMORY* @in_memory_store
+  store %T_IN_MEMORY %1, ptr @in_memory_store
   ret void
 }
 
@@ -488,7 +488,7 @@ define void @callee_in_memory(%T_IN_MEMORY %a) {
 ; CHECK-NEXT:    stp q1, q2, [x8, #32]
 ; CHECK-NEXT:    stp q0, q3, [x8]
 ; CHECK-NEXT:    ret
-  store %T_IN_MEMORY %a, %T_IN_MEMORY* @in_memory_store
+  store %T_IN_MEMORY %a, ptr @in_memory_store
   ret void
 }
 
@@ -511,7 +511,7 @@ define void @argument_in_memory() {
 ; CHECK-NEXT:    ldr x30, [sp, #80] // 8-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
-  %1 = load %T_IN_MEMORY, %T_IN_MEMORY* @in_memory_store
+  %1 = load %T_IN_MEMORY, ptr @in_memory_store
   call void @callee_in_memory(%T_IN_MEMORY %1)
   ret void
 }
@@ -547,7 +547,7 @@ define void @caller_no_block() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %1 = call %T_NO_BLOCK @return_no_block()
-  store %T_NO_BLOCK %1, %T_NO_BLOCK* @no_block_store
+  store %T_NO_BLOCK %1, ptr @no_block_store
   ret void
 }
 
@@ -561,7 +561,7 @@ define void @callee_no_block(%T_NO_BLOCK %a) {
 ; CHECK-NEXT:    str w0, [x8, #8]
 ; CHECK-NEXT:    str d0, [x8]
 ; CHECK-NEXT:    ret
-  store %T_NO_BLOCK %a, %T_NO_BLOCK* @no_block_store
+  store %T_NO_BLOCK %a, ptr @no_block_store
   ret void
 }
 
@@ -580,7 +580,7 @@ define void @argument_no_block() {
 ; CHECK-NEXT:    bl callee_no_block
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %1 = load %T_NO_BLOCK, %T_NO_BLOCK* @no_block_store
+  %1 = load %T_NO_BLOCK, ptr @no_block_store
   call void @callee_no_block(%T_NO_BLOCK %1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll b/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll
index 6fb7c3fb5e0aa..f31f43f1fde45 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2011-03-09-CPSRSpill.ll
@@ -21,7 +21,7 @@ _ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71: ; preds = %bb.i69,
   %1 = fdiv float %0, undef
   %2 = fcmp ult float %1, 0xBF847AE140000000
   %storemerge9 = select i1 %2, float %1, float 0.000000e+00
-  store float %storemerge9, float* undef, align 4
+  store float %storemerge9, ptr undef, align 4
   br i1 undef, label %bb42, label %bb47
 
 bb42:                                             ; preds = %_ZN12gjkepa2_impl3EPA6appendERNS0_5sListEPNS0_5sFaceE.exit71

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll b/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
index d9d12c3f43e78..a4720a9a738f7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2011-03-17-AsmPrinterCrash.ll
@@ -14,8 +14,8 @@ if.then24:                                        ; preds = %entry
   unreachable
 
 if.else295:                                       ; preds = %entry
-  call void @llvm.dbg.declare(metadata i32* %do_tab_convert, metadata !14, metadata !16), !dbg !17
-  store i32 0, i32* %do_tab_convert, align 4, !dbg !18
+  call void @llvm.dbg.declare(metadata ptr %do_tab_convert, metadata !14, metadata !16), !dbg !17
+  store i32 0, ptr %do_tab_convert, align 4, !dbg !18
   unreachable
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll b/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll
index 72213bbcf9675..7c25e9e609a17 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2011-03-21-Unaligned-Frame-Index.ll
@@ -5,8 +5,7 @@ define void @foo(i64 %val) {
 ;   instruction that can handle that.
 ; CHECK: stur x0, [sp, #20]
   %a = alloca [49 x i32], align 4
-  %p32 = getelementptr inbounds [49 x i32], [49 x i32]* %a, i64 0, i64 2
-  %p = bitcast i32* %p32 to i64*
-  store i64 %val, i64* %p, align 8
+  %p32 = getelementptr inbounds [49 x i32], ptr %a, i64 0, i64 2
+  store i64 %val, ptr %p32, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll b/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
index e2c39e0b62328..83a9ae7a1b7b0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
@@ -5,10 +5,10 @@
 
 define hidden void @t() nounwind {
 entry:
-  %cmp = icmp eq i32* null, undef
+  %cmp = icmp eq ptr null, undef
   %frombool = zext i1 %cmp to i8
-  store i8 %frombool, i8* undef, align 1
-  %tmp4 = load i8, i8* undef, align 1
+  store i8 %frombool, ptr undef, align 1
+  %tmp4 = load i8, ptr undef, align 1
   %tobool = trunc i8 %tmp4 to i1
   br i1 %tobool, label %land.lhs.true, label %if.end
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
index b69cd24211662..3b6c4fa875e60 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
@@ -18,9 +18,9 @@ for.body:
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %0 = shl nsw i64 %indvars.iv, 12
   %add = add nsw i64 %0, 34628173824
-  %1 = inttoptr i64 %add to i32*
-  %2 = load volatile i32, i32* %1, align 4096
-  store volatile i32 %2, i32* @test_data, align 4
+  %1 = inttoptr i64 %add to ptr
+  %2 = load volatile i32, ptr %1, align 4096
+  store volatile i32 %2, ptr @test_data, align 4
   %indvars.iv.next = add i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, 200

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll b/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
index b8855fb5cdb39..b7b111c60bd9b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
@@ -13,7 +13,7 @@ lor.lhs.false:
   br i1 undef, label %return, label %if.end
 
 if.end:
-  %tmp.i = load i64, i64* undef, align 8
+  %tmp.i = load i64, ptr undef, align 8
   %and.i.i.i = and i64 %tmp.i, -16
   br i1 %IsArrow, label %if.else_crit_edge, label %if.end32
 
@@ -26,7 +26,7 @@ if.end32:
   %.pn.v = select i1 %0, i320 128, i320 64
   %.pn = shl i320 %1, %.pn.v
   %ins346392 = or i320 %.pn, 0
-  store i320 %ins346392, i320* undef, align 8
+  store i320 %ins346392, ptr undef, align 8
   br i1 undef, label %sw.bb.i.i, label %exit
 
 sw.bb.i.i:

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
index dc1dc56eedb99..626dba4c32127 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
@@ -13,9 +13,9 @@
 ; CHECK-NEXT: str  [[VAL]], [x0, #8]
 ; CHECK-NEXT: str  [[VAL2]], [x0]
 
-define void @foo(i8* %a) {
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 bitcast ([3 x i32]* @b to i8*), i64 12, i1 false)
+define void @foo(ptr %a) {
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 @b, i64 12, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
index 7da2d2ca513e5..f859d1f66e60d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-09-LOADgot-bug.ll
@@ -2,9 +2,9 @@
 ; RUN: llc -mtriple=arm64-linux-gnu -relocation-model=pic < %s | FileCheck %s --check-prefix=CHECK-LINUX
 ; <rdar://problem/11392109>
 
-define hidden void @t(i64* %addr) optsize ssp {
+define hidden void @t(ptr %addr) optsize ssp {
 entry:
-  store i64 zext (i32 ptrtoint (i64 (i32)* @x to i32) to i64), i64* %addr, align 8
+  store i64 zext (i32 ptrtoint (ptr @x to i32) to i64), ptr %addr, align 8
 ; CHECK:             adrp    x{{[0-9]+}}, _x at GOTPAGE
 ; CHECK:        ldr     x{{[0-9]+}}, [x{{[0-9]+}}, _x at GOTPAGEOFF]
 ; CHECK-NEXT:        and     x{{[0-9]+}}, x{{[0-9]+}}, #0xffffffff

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
index bd0028c74528c..972879f0db357 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
@@ -11,27 +11,22 @@
 
 @"OBJC_IVAR_$_UIScreen._bounds" = external hidden global i64, section "__DATA, __objc_ivar", align 8
 
-define hidden %struct.CGRect @t(%0* nocapture %self, i8* nocapture %_cmd) nounwind readonly optsize ssp {
+define hidden %struct.CGRect @t(ptr nocapture %self, ptr nocapture %_cmd) nounwind readonly optsize ssp {
 entry:
 ; CHECK-LABEL: t:
 ; CHECK: ldp d{{[0-9]+}}, d{{[0-9]+}}
-  %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
-  %0 = bitcast %0* %self to i8*
-  %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
-  %add.ptr10.0 = bitcast i8* %add.ptr to double*
-  %tmp11 = load double, double* %add.ptr10.0, align 8
+  %ivar = load i64, ptr @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+  %add.ptr = getelementptr inbounds i8, ptr %self, i64 %ivar
+  %tmp11 = load double, ptr %add.ptr, align 8
   %add.ptr.sum = add i64 %ivar, 8
-  %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
-  %1 = bitcast i8* %add.ptr10.1 to double*
-  %tmp12 = load double, double* %1, align 8
+  %add.ptr10.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr.sum
+  %tmp12 = load double, ptr %add.ptr10.1, align 8
   %add.ptr.sum17 = add i64 %ivar, 16
-  %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17
-  %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
-  %tmp = load double, double* %add.ptr4.1.0, align 8
+  %add.ptr4.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr.sum17
+  %tmp = load double, ptr %add.ptr4.1, align 8
   %add.ptr4.1.sum = add i64 %ivar, 24
-  %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum
-  %2 = bitcast i8* %add.ptr4.1.1 to double*
-  %tmp5 = load double, double* %2, align 8
+  %add.ptr4.1.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr4.1.sum
+  %tmp5 = load double, ptr %add.ptr4.1.1, align 8
   %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
   %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
   %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
index 369b94be94c51..93eaf3618cbb2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
@@ -12,19 +12,19 @@ define void @testDouble(double %d) ssp {
 ; CHECK:  fcvtzu w{{[0-9]+}}, d{{[0-9]+}}
 entry:
   %d.addr = alloca double, align 8
-  store double %d, double* %d.addr, align 8
-  %0 = load double, double* %d.addr, align 8
-  %1 = load double, double* %d.addr, align 8
+  store double %d, ptr %d.addr, align 8
+  %0 = load double, ptr %d.addr, align 8
+  %1 = load double, ptr %d.addr, align 8
   %conv = fptoui double %1 to i64
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), double %0, i64 %conv)
-  %2 = load double, double* %d.addr, align 8
-  %3 = load double, double* %d.addr, align 8
+  %call = call i32 (ptr, ...) @printf(ptr @.str, double %0, i64 %conv)
+  %2 = load double, ptr %d.addr, align 8
+  %3 = load double, ptr %d.addr, align 8
   %conv1 = fptoui double %3 to i32
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str1, i32 0, i32 0), double %2, i32 %conv1)
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str1, double %2, i32 %conv1)
   ret void
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
 define void @testFloat(float %f) ssp {
 ; CHECK-LABEL: testFloat:
@@ -32,28 +32,28 @@ define void @testFloat(float %f) ssp {
 ; CHECK:  fcvtzu w{{[0-9]+}}, s{{[0-9]+}}
 entry:
   %f.addr = alloca float, align 4
-  store float %f, float* %f.addr, align 4
-  %0 = load float, float* %f.addr, align 4
+  store float %f, ptr %f.addr, align 4
+  %0 = load float, ptr %f.addr, align 4
   %conv = fpext float %0 to double
-  %1 = load float, float* %f.addr, align 4
+  %1 = load float, ptr %f.addr, align 4
   %conv1 = fptoui float %1 to i64
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str2, i32 0, i32 0), double %conv, i64 %conv1)
-  %2 = load float, float* %f.addr, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str2, double %conv, i64 %conv1)
+  %2 = load float, ptr %f.addr, align 4
   %conv2 = fpext float %2 to double
-  %3 = load float, float* %f.addr, align 4
+  %3 = load float, ptr %f.addr, align 4
   %conv3 = fptoui float %3 to i32
-  %call4 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), double %conv2, i32 %conv3)
+  %call4 = call i32 (ptr, ...) @printf(ptr @.str3, double %conv2, i32 %conv3)
   ret void
 }
 
-define i32 @main(i32 %argc, i8** %argv) ssp {
+define i32 @main(i32 %argc, ptr %argv) ssp {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  %argv.addr = alloca i8**, align 8
-  store i32 0, i32* %retval
-  store i32 %argc, i32* %argc.addr, align 4
-  store i8** %argv, i8*** %argv.addr, align 8
+  %argv.addr = alloca ptr, align 8
+  store i32 0, ptr %retval
+  store i32 %argc, ptr %argc.addr, align 4
+  store ptr %argv, ptr %argv.addr, align 8
   call void @testDouble(double 1.159198e+01)
   call void @testFloat(float 0x40272F1800000000)
   ret i32 0

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
index 997431bda5604..b87fe926fb32c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
@@ -1,21 +1,21 @@
 ; RUN: llc < %s -mtriple=arm64-apple-ios
 ; rdar://11849816
 
- at shlib_path_substitutions = external hidden unnamed_addr global i8**, align 8
+ at shlib_path_substitutions = external hidden unnamed_addr global ptr, align 8
 
-declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
+declare i64 @llvm.objectsize.i64(ptr, i1) nounwind readnone
 
-declare noalias i8* @xmalloc(i64) optsize
+declare noalias ptr @xmalloc(i64) optsize
 
-declare i64 @strlen(i8* nocapture) nounwind readonly optsize
+declare i64 @strlen(ptr nocapture) nounwind readonly optsize
 
-declare i8* @__strcpy_chk(i8*, i8*, i64) nounwind optsize
+declare ptr @__strcpy_chk(ptr, ptr, i64) nounwind optsize
 
-declare i8* @__strcat_chk(i8*, i8*, i64) nounwind optsize
+declare ptr @__strcat_chk(ptr, ptr, i64) nounwind optsize
 
-declare noalias i8* @xstrdup(i8*) optsize
+declare noalias ptr @xstrdup(ptr) optsize
 
-define i8* @dyld_fix_path(i8* %path) nounwind optsize ssp {
+define ptr @dyld_fix_path(ptr %path) nounwind optsize ssp {
 entry:
   br i1 undef, label %if.end56, label %for.cond
 
@@ -29,7 +29,7 @@ for.cond10:                                       ; preds = %for.cond
   br i1 undef, label %if.end56, label %for.body14
 
 for.body14:                                       ; preds = %for.cond10
-  %call22 = tail call i64 @strlen(i8* undef) nounwind optsize
+  %call22 = tail call i64 @strlen(ptr undef) nounwind optsize
   %sext = shl i64 %call22, 32
   %conv30 = ashr exact i64 %sext, 32
   %add29 = sub i64 0, %conv30
@@ -37,20 +37,20 @@ for.body14:                                       ; preds = %for.cond10
   %add31 = shl i64 %sub, 32
   %sext59 = add i64 %add31, 4294967296
   %conv33 = ashr exact i64 %sext59, 32
-  %call34 = tail call noalias i8* @xmalloc(i64 %conv33) nounwind optsize
+  %call34 = tail call noalias ptr @xmalloc(i64 %conv33) nounwind optsize
   br i1 undef, label %cond.false45, label %cond.true43
 
 cond.true43:                                      ; preds = %for.body14
   unreachable
 
 cond.false45:                                     ; preds = %for.body14
-  %add.ptr = getelementptr inbounds i8, i8* %path, i64 %conv30
+  %add.ptr = getelementptr inbounds i8, ptr %path, i64 %conv30
   unreachable
 
 if.end56:                                         ; preds = %for.cond10, %entry
-  ret i8* null
+  ret ptr null
 }
 
-declare i32 @strncmp(i8* nocapture, i8* nocapture, i64) nounwind readonly optsize
+declare i32 @strncmp(ptr nocapture, ptr nocapture, i64) nounwind readonly optsize
 
-declare i8* @strcpy(i8*, i8* nocapture) nounwind
+declare ptr @strcpy(ptr, ptr nocapture) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll
index 4d78b33135303..f28702bdbdd0c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-frem-crash.ll
@@ -7,7 +7,7 @@ entry:
   br i1 undef, label %CF, label %CF77
 
 CF:                                               ; preds = %CF, %CF76
-  store float %B26, float* undef
+  store float %B26, ptr undef
   br i1 undef, label %CF, label %CF77
 
 CF77:                                             ; preds = %CF

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll
index 9b1dec1ac892d..29f5cf8e72b64 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2013-01-23-sext-crash.ll
@@ -32,6 +32,6 @@ CF83:                                             ; preds = %CF
 define void @_Z12my_example2bv() nounwind noinline ssp {
 entry:
   %0 = fptosi <2 x double> undef to <2 x i32>
-  store <2 x i32> %0, <2 x i32>* undef, align 8
+  store <2 x i32> %0, ptr undef, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll b/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll
index c13b65d34a1a1..da121b97b55d5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2013-02-12-shufv8i8.ll
@@ -3,7 +3,7 @@
 ;CHECK-LABEL: Shuff:
 ;CHECK: tbl.8b
 ;CHECK: ret
-define <8 x i8 > @Shuff(<8 x i8> %in, <8 x i8>* %out) nounwind ssp {
+define <8 x i8 > @Shuff(<8 x i8> %in, ptr %out) nounwind ssp {
   %value = shufflevector <8 x i8> %in, <8 x i8> zeroinitializer, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i8> %value
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll
index 1ace2461e6412..03393ad6aef5c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll
@@ -4,7 +4,7 @@
 
 ; CHECK-LABEL: @test_i128_align
 define dso_local i128 @test_i128_align(i32, i128 %arg, i32 %after) {
-  store i32 %after, i32* @var, align 4
+  store i32 %after, ptr @var, align 4
 ; CHECK-DAG: str w4, [{{x[0-9]+}}, :lo12:var]
 
   ret i128 %arg
@@ -14,7 +14,7 @@ define dso_local i128 @test_i128_align(i32, i128 %arg, i32 %after) {
 
 ; CHECK-LABEL: @test_i64x2_align
 define [2 x i64] @test_i64x2_align(i32, [2 x i64] %arg, i32 %after) {
-  store i32 %after, i32* @var, align 4
+  store i32 %after, ptr @var, align 4
 ; CHECK-DAG: str w3, [{{x[0-9]+}}, :lo12:var]
 
   ret [2 x i64] %arg
@@ -35,22 +35,22 @@ define dso_local void @test_stack_slots([8 x i64], i1 %bool, i8 %char, i16 %shor
 ; CHECK-DAG: ldrb w[[ext5:[0-9]+]], [sp]
 
   %ext_bool = zext i1 %bool to i64
-  store volatile i64 %ext_bool, i64* @var64, align 8
+  store volatile i64 %ext_bool, ptr @var64, align 8
 ; CHECK: str x[[ext5]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_char = zext i8 %char to i64
-  store volatile i64 %ext_char, i64* @var64, align 8
+  store volatile i64 %ext_char, ptr @var64, align 8
 ; CHECK: str x[[ext3]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_short = zext i16 %short to i64
-  store volatile i64 %ext_short, i64* @var64, align 8
+  store volatile i64 %ext_short, ptr @var64, align 8
 ; CHECK: str x[[ext2]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_int = zext i32 %int to i64
-  store volatile i64 %ext_int, i64* @var64, align 8
+  store volatile i64 %ext_int, ptr @var64, align 8
 ; CHECK: str x[[ext1]], [{{x[0-9]+}}, :lo12:var64]
 
-  store volatile i64 %long, i64* @var64, align 8
+  store volatile i64 %long, ptr @var64, align 8
 ; CHECK: str x[[ext4]], [{{x[0-9]+}}, :lo12:var64]
 
   ret void
@@ -61,22 +61,22 @@ define dso_local void @test_stack_slots([8 x i64], i1 %bool, i8 %char, i16 %shor
 
 define dso_local void @test_extension(i1 %bool, i8 %char, i16 %short, i32 %int) {
   %ext_bool = zext i1 %bool to i64
-  store volatile i64 %ext_bool, i64* @var64
+  store volatile i64 %ext_bool, ptr @var64
 ; CHECK: and [[EXT:x[0-9]+]], x0, #0x1
 ; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_char = sext i8 %char to i64
-  store volatile i64 %ext_char, i64* @var64
+  store volatile i64 %ext_char, ptr @var64
 ; CHECK: sxtb [[EXT:x[0-9]+]], w1
 ; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_short = zext i16 %short to i64
-  store volatile i64 %ext_short, i64* @var64
+  store volatile i64 %ext_short, ptr @var64
 ; CHECK: and [[EXT:x[0-9]+]], x2, #0xffff
 ; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_int = zext i32 %int to i64
-  store volatile i64 %ext_int, i64* @var64
+  store volatile i64 %ext_int, ptr @var64
 ; CHECK: mov w[[EXT:[0-9]+]], w3
 ; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64]
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
index b2ac6fa314351..e8c1c124c06bc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
@@ -4,7 +4,7 @@
 ; rdar://13625505
 ; Here we have 9 fixed integer arguments the 9th argument in on stack, the
 ; varargs start right after at 8-byte alignment.
-define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp {
+define void @fn9(ptr %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp {
 ; CHECK-LABEL: fn9:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -34,31 +34,30 @@ define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7,
   %7 = alloca i32, align 4
   %8 = alloca i32, align 4
   %9 = alloca i32, align 4
-  %args = alloca i8*, align 8
+  %args = alloca ptr, align 8
   %a10 = alloca i32, align 4
   %a11 = alloca i32, align 4
   %a12 = alloca i32, align 4
-  store i32 %a2, i32* %2, align 4
-  store i32 %a3, i32* %3, align 4
-  store i32 %a4, i32* %4, align 4
-  store i32 %a5, i32* %5, align 4
-  store i32 %a6, i32* %6, align 4
-  store i32 %a7, i32* %7, align 4
-  store i32 %a8, i32* %8, align 4
-  store i32 %a9, i32* %9, align 4
-  store i32 %a9, i32* %a1
-  %10 = bitcast i8** %args to i8*
-  call void @llvm.va_start(i8* %10)
-  %11 = va_arg i8** %args, i32
-  store i32 %11, i32* %a10, align 4
-  %12 = va_arg i8** %args, i32
-  store i32 %12, i32* %a11, align 4
-  %13 = va_arg i8** %args, i32
-  store i32 %13, i32* %a12, align 4
+  store i32 %a2, ptr %2, align 4
+  store i32 %a3, ptr %3, align 4
+  store i32 %a4, ptr %4, align 4
+  store i32 %a5, ptr %5, align 4
+  store i32 %a6, ptr %6, align 4
+  store i32 %a7, ptr %7, align 4
+  store i32 %a8, ptr %8, align 4
+  store i32 %a9, ptr %9, align 4
+  store i32 %a9, ptr %a1
+  call void @llvm.va_start(ptr %args)
+  %10 = va_arg ptr %args, i32
+  store i32 %10, ptr %a10, align 4
+  %11 = va_arg ptr %args, i32
+  store i32 %11, ptr %a11, align 4
+  %12 = va_arg ptr %args, i32
+  store i32 %12, ptr %a12, align 4
   ret void
 }
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
 
 define i32 @main() nounwind ssp {
 ; CHECK-LABEL: main:
@@ -111,37 +110,37 @@ define i32 @main() nounwind ssp {
   %a10 = alloca i32, align 4
   %a11 = alloca i32, align 4
   %a12 = alloca i32, align 4
-  store i32 1, i32* %a1, align 4
-  store i32 2, i32* %a2, align 4
-  store i32 3, i32* %a3, align 4
-  store i32 4, i32* %a4, align 4
-  store i32 5, i32* %a5, align 4
-  store i32 6, i32* %a6, align 4
-  store i32 7, i32* %a7, align 4
-  store i32 8, i32* %a8, align 4
-  store i32 9, i32* %a9, align 4
-  store i32 10, i32* %a10, align 4
-  store i32 11, i32* %a11, align 4
-  store i32 12, i32* %a12, align 4
-  %1 = load i32, i32* %a1, align 4
-  %2 = load i32, i32* %a2, align 4
-  %3 = load i32, i32* %a3, align 4
-  %4 = load i32, i32* %a4, align 4
-  %5 = load i32, i32* %a5, align 4
-  %6 = load i32, i32* %a6, align 4
-  %7 = load i32, i32* %a7, align 4
-  %8 = load i32, i32* %a8, align 4
-  %9 = load i32, i32* %a9, align 4
-  %10 = load i32, i32* %a10, align 4
-  %11 = load i32, i32* %a11, align 4
-  %12 = load i32, i32* %a12, align 4
-  call void (i32*, i32, i32, i32, i32, i32, i32, i32, i32, ...) @fn9(i32* %a1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12)
+  store i32 1, ptr %a1, align 4
+  store i32 2, ptr %a2, align 4
+  store i32 3, ptr %a3, align 4
+  store i32 4, ptr %a4, align 4
+  store i32 5, ptr %a5, align 4
+  store i32 6, ptr %a6, align 4
+  store i32 7, ptr %a7, align 4
+  store i32 8, ptr %a8, align 4
+  store i32 9, ptr %a9, align 4
+  store i32 10, ptr %a10, align 4
+  store i32 11, ptr %a11, align 4
+  store i32 12, ptr %a12, align 4
+  %1 = load i32, ptr %a1, align 4
+  %2 = load i32, ptr %a2, align 4
+  %3 = load i32, ptr %a3, align 4
+  %4 = load i32, ptr %a4, align 4
+  %5 = load i32, ptr %a5, align 4
+  %6 = load i32, ptr %a6, align 4
+  %7 = load i32, ptr %a7, align 4
+  %8 = load i32, ptr %a8, align 4
+  %9 = load i32, ptr %a9, align 4
+  %10 = load i32, ptr %a10, align 4
+  %11 = load i32, ptr %a11, align 4
+  %12 = load i32, ptr %a12, align 4
+  call void (ptr, i32, i32, i32, i32, i32, i32, i32, i32, ...) @fn9(ptr %a1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12)
   ret i32 0
 }
 
 ;rdar://13668483
 @.str = private unnamed_addr constant [4 x i8] c"fmt\00", align 1
-define void @foo(i8* %fmt, ...) nounwind {
+define void @foo(ptr %fmt, ...) nounwind {
 ; CHECK-LABEL: foo:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #48
@@ -156,17 +155,16 @@ define void @foo(i8* %fmt, ...) nounwind {
 ; CHECK-NEXT:    str q0, [sp], #48
 ; CHECK-NEXT:    ret
 entry:
-  %fmt.addr = alloca i8*, align 8
-  %args = alloca i8*, align 8
+  %fmt.addr = alloca ptr, align 8
+  %args = alloca ptr, align 8
   %vc = alloca i32, align 4
   %vv = alloca <4 x i32>, align 16
-  store i8* %fmt, i8** %fmt.addr, align 8
-  %args1 = bitcast i8** %args to i8*
-  call void @llvm.va_start(i8* %args1)
-  %0 = va_arg i8** %args, i32
-  store i32 %0, i32* %vc, align 4
-  %1 = va_arg i8** %args, <4 x i32>
-  store <4 x i32> %1, <4 x i32>* %vv, align 16
+  store ptr %fmt, ptr %fmt.addr, align 8
+  call void @llvm.va_start(ptr %args)
+  %0 = va_arg ptr %args, i32
+  store i32 %0, ptr %vc, align 4
+  %1 = va_arg ptr %args, <4 x i32>
+  store <4 x i32> %1, ptr %vv, align 16
   ret void
 }
 
@@ -191,11 +189,11 @@ define void @bar(i32 %x, <4 x i32> %y) nounwind {
 entry:
   %x.addr = alloca i32, align 4
   %y.addr = alloca <4 x i32>, align 16
-  store i32 %x, i32* %x.addr, align 4
-  store <4 x i32> %y, <4 x i32>* %y.addr, align 16
-  %0 = load i32, i32* %x.addr, align 4
-  %1 = load <4 x i32>, <4 x i32>* %y.addr, align 16
-  call void (i8*, ...) @foo(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %0, <4 x i32> %1)
+  store i32 %x, ptr %x.addr, align 4
+  store <4 x i32> %y, ptr %y.addr, align 16
+  %0 = load i32, ptr %x.addr, align 4
+  %1 = load <4 x i32>, ptr %y.addr, align 16
+  call void (ptr, ...) @foo(ptr @.str, i32 %0, <4 x i32> %1)
   ret void
 }
 
@@ -203,7 +201,7 @@ entry:
 ; When passing 16-byte aligned small structs as vararg, make sure the caller
 ; side is 16-byte aligned on stack.
 %struct.s41 = type { i32, i16, i32, i16 }
-define void @foo2(i8* %fmt, ...) nounwind {
+define void @foo2(ptr %fmt, ...) nounwind {
 ; CHECK-LABEL: foo2:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #48
@@ -218,29 +216,25 @@ define void @foo2(i8* %fmt, ...) nounwind {
 ; CHECK-NEXT:    str q0, [sp], #48
 ; CHECK-NEXT:    ret
 entry:
-  %fmt.addr = alloca i8*, align 8
-  %args = alloca i8*, align 8
+  %fmt.addr = alloca ptr, align 8
+  %args = alloca ptr, align 8
   %vc = alloca i32, align 4
   %vs = alloca %struct.s41, align 16
-  store i8* %fmt, i8** %fmt.addr, align 8
-  %args1 = bitcast i8** %args to i8*
-  call void @llvm.va_start(i8* %args1)
-  %0 = va_arg i8** %args, i32
-  store i32 %0, i32* %vc, align 4
-  %ap.cur = load i8*, i8** %args
-  %1 = getelementptr i8, i8* %ap.cur, i32 15
-  %2 = ptrtoint i8* %1 to i64
+  store ptr %fmt, ptr %fmt.addr, align 8
+  call void @llvm.va_start(ptr %args)
+  %0 = va_arg ptr %args, i32
+  store i32 %0, ptr %vc, align 4
+  %ap.cur = load ptr, ptr %args
+  %1 = getelementptr i8, ptr %ap.cur, i32 15
+  %2 = ptrtoint ptr %1 to i64
   %3 = and i64 %2, -16
-  %ap.align = inttoptr i64 %3 to i8*
-  %ap.next = getelementptr i8, i8* %ap.align, i32 16
-  store i8* %ap.next, i8** %args
-  %4 = bitcast i8* %ap.align to %struct.s41*
-  %5 = bitcast %struct.s41* %vs to i8*
-  %6 = bitcast %struct.s41* %4 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %5, i8* align 16 %6, i64 16, i1 false)
+  %ap.align = inttoptr i64 %3 to ptr
+  %ap.next = getelementptr i8, ptr %ap.align, i32 16
+  store ptr %ap.next, ptr %args
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %vs, ptr align 16 %ap.align, i64 16, i1 false)
   ret void
 }
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
 
 define void @bar2(i32 %x, i128 %s41.coerce) nounwind {
 ; CHECK-LABEL: bar2:
@@ -264,12 +258,10 @@ define void @bar2(i32 %x, i128 %s41.coerce) nounwind {
 entry:
   %x.addr = alloca i32, align 4
   %s41 = alloca %struct.s41, align 16
-  store i32 %x, i32* %x.addr, align 4
-  %0 = bitcast %struct.s41* %s41 to i128*
-  store i128 %s41.coerce, i128* %0, align 1
-  %1 = load i32, i32* %x.addr, align 4
-  %2 = bitcast %struct.s41* %s41 to i128*
-  %3 = load i128, i128* %2, align 1
-  call void (i8*, ...) @foo2(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %1, i128 %3)
+  store i32 %x, ptr %x.addr, align 4
+  store i128 %s41.coerce, ptr %s41, align 1
+  %0 = load i32, ptr %x.addr, align 4
+  %1 = load i128, ptr %s41, align 1
+  call void (ptr, ...) @foo2(ptr @.str, i32 %0, i128 %1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-abi.ll b/llvm/test/CodeGen/AArch64/arm64-abi.ll
index ba17810e32a6c..4168fdfda0954 100644
--- a/llvm/test/CodeGen/AArch64/arm64-abi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-abi.ll
@@ -75,7 +75,7 @@ declare double @ext([2 x float])
 ; rdar://12656141
 ; 16-byte vector should be aligned at 16-byte when passing on stack.
 ; A double argument will be passed on stack, so vecotr should be at sp+16.
-define double @fixed_4i(<4 x i32>* nocapture %in) nounwind {
+define double @fixed_4i(ptr nocapture %in) nounwind {
 entry:
 ; CHECK-LABEL: fixed_4i
 ; CHECK: str [[REG_1:q[0-9]+]], [sp, #16]
@@ -83,7 +83,7 @@ entry:
 ; FAST: sub sp, sp
 ; FAST: mov x[[ADDR:[0-9]+]], sp
 ; FAST: str [[REG_1:q[0-9]+]], [x[[ADDR]], #16]
-  %0 = load <4 x i32>, <4 x i32>* %in, align 16
+  %0 = load <4 x i32>, ptr %in, align 16
   %call = tail call double @args_vec_4i(double 3.000000e+00, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, double 3.000000e+00, <4 x i32> %0, i8 signext 3)
   ret double %call
 }
@@ -104,7 +104,7 @@ entry:
   %conv1 = fpext float %add to double
   %add2 = fadd double %conv1, %d7
   %add3 = fadd double %add2, %d8
-  store double %add3, double* @g_d, align 8
+  store double %add3, ptr @g_d, align 8
   ret void
 }
 
@@ -123,13 +123,13 @@ entry:
   %add3 = fadd double %conv2, %conv1
   %conv4 = sitofp i32 %i9 to double
   %add5 = fadd double %conv4, %add3
-  store double %add5, double* @g_d, align 8
+  store double %add5, ptr @g_d, align 8
   ret void
 }
 
 ; rdar://12648441
 ; Check alignment on stack for v64, f64, i64, f32, i32.
-define double @test3(<2 x i32>* nocapture %in) nounwind {
+define double @test3(ptr nocapture %in) nounwind {
 entry:
 ; CHECK-LABEL: test3
 ; CHECK: str [[REG_1:d[0-9]+]], [sp, #8]
@@ -137,7 +137,7 @@ entry:
 ; FAST: sub sp, sp, #{{[0-9]+}}
 ; FAST: mov x[[ADDR:[0-9]+]], sp
 ; FAST: str [[REG_1:d[0-9]+]], [x[[ADDR]], #8]
-  %0 = load <2 x i32>, <2 x i32>* %in, align 8
+  %0 = load <2 x i32>, ptr %in, align 8
   %call = tail call double @args_vec_2i(double 3.000000e+00, <2 x i32> %0,
           <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0,
           <2 x i32> %0, float 3.000000e+00, <2 x i32> %0, i8 signext 3)
@@ -146,13 +146,13 @@ entry:
 declare double @args_vec_2i(double, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>,
                <2 x i32>, <2 x i32>, <2 x i32>, float, <2 x i32>, i8 signext)
 
-define double @test4(double* nocapture %in) nounwind {
+define double @test4(ptr nocapture %in) nounwind {
 entry:
 ; CHECK-LABEL: test4
 ; CHECK: str [[REG_1:d[0-9]+]], [sp, #8]
 ; CHECK: str [[REG_2:w[0-9]+]], [sp]
 ; CHECK: mov w0, #3
-  %0 = load double, double* %in, align 8
+  %0 = load double, ptr %in, align 8
   %call = tail call double @args_f64(double 3.000000e+00, double %0, double %0,
           double %0, double %0, double %0, double %0, double %0,
           float 3.000000e+00, double %0, i8 signext 3)
@@ -161,13 +161,13 @@ entry:
 declare double @args_f64(double, double, double, double, double, double, double,
                double, float, double, i8 signext)
 
-define i64 @test5(i64* nocapture %in) nounwind {
+define i64 @test5(ptr nocapture %in) nounwind {
 entry:
 ; CHECK-LABEL: test5
 ; CHECK: strb [[REG_3:w[0-9]+]], [sp, #16]
 ; CHECK: str [[REG_1:x[0-9]+]], [sp, #8]
 ; CHECK: str [[REG_2:w[0-9]+]], [sp]
-  %0 = load i64, i64* %in, align 8
+  %0 = load i64, ptr %in, align 8
   %call = tail call i64 @args_i64(i64 3, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0,
                          i64 %0, i64 %0, i32 3, i64 %0, i8 signext 3)
   ret i64 %call
@@ -175,13 +175,13 @@ entry:
 declare i64 @args_i64(i64, i64, i64, i64, i64, i64, i64, i64, i32, i64,
              i8 signext)
 
-define i32 @test6(float* nocapture %in) nounwind {
+define i32 @test6(ptr nocapture %in) nounwind {
 entry:
 ; CHECK-LABEL: test6
 ; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
 ; CHECK: str [[REG_1:s[0-9]+]], [sp, #4]
 ; CHECK: strh [[REG_3:w[0-9]+]], [sp]
-  %0 = load float, float* %in, align 4
+  %0 = load float, ptr %in, align 4
   %call = tail call i32 @args_f32(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
           i32 7, i32 8, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0,
           float 6.0, float 7.0, float 8.0, i16 signext 3, float %0,
@@ -192,13 +192,13 @@ declare i32 @args_f32(i32, i32, i32, i32, i32, i32, i32, i32,
                       float, float, float, float, float, float, float, float,
                       i16 signext, float, i8 signext)
 
-define i32 @test7(i32* nocapture %in) nounwind {
+define i32 @test7(ptr nocapture %in) nounwind {
 entry:
 ; CHECK-LABEL: test7
 ; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
 ; CHECK: str [[REG_1:w[0-9]+]], [sp, #4]
 ; CHECK: strh [[REG_3:w[0-9]+]], [sp]
-  %0 = load i32, i32* %in, align 4
+  %0 = load i32, ptr %in, align 4
   %call = tail call i32 @args_i32(i32 3, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0,
                          i32 %0, i32 %0, i16 signext 3, i32 %0, i8 signext 4)
   ret i32 %call
@@ -206,7 +206,7 @@ entry:
 declare i32 @args_i32(i32, i32, i32, i32, i32, i32, i32, i32, i16 signext, i32,
              i8 signext)
 
-define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @test8(i32 %argc, ptr nocapture %argv) nounwind {
 entry:
 ; CHECK-LABEL: test8
 ; CHECK: str w8, [sp]

diff  --git a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll
index 66bd2ef5ef394..089e171e5a4a7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll
@@ -58,8 +58,8 @@ entry:
 ; CHECK-LABEL: caller38
 ; CHECK: ldr x1,
 ; CHECK: ldr x2,
-  %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4
-  %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
+  %0 = load i64, ptr @g38, align 4
+  %1 = load i64, ptr @g38_2, align 4
   %call = tail call i32 @f38(i32 3, i64 %0, i64 %1) #5
   ret i32 %call
 }
@@ -75,8 +75,8 @@ entry:
 ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8]
 ; CHECK: mov w[[C:[0-9]+]], #9
 ; CHECK: str w[[C]], [sp]
-  %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4
-  %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
+  %0 = load i64, ptr @g38, align 4
+  %1 = load i64, ptr @g38_2, align 4
   %call = tail call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
                                    i32 7, i32 8, i32 9, i64 %0, i64 %1) #5
   ret i32 %call
@@ -111,8 +111,8 @@ entry:
 ; CHECK-LABEL: caller39
 ; CHECK: ldp x1, x2,
 ; CHECK: ldp x3, x4,
-  %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16
-  %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
+  %0 = load i128, ptr @g39, align 16
+  %1 = load i128, ptr @g39_2, align 16
   %call = tail call i32 @f39(i32 3, i128 %0, i128 %1) #5
   ret i32 %call
 }
@@ -129,8 +129,8 @@ entry:
 ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
 ; CHECK: mov w[[C:[0-9]+]], #9
 ; CHECK: str w[[C]], [sp]
-  %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16
-  %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
+  %0 = load i128, ptr @g39, align 16
+  %1 = load i128, ptr @g39_2, align 16
   %call = tail call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
                                    i32 7, i32 8, i32 9, i128 %0, i128 %1) #5
   ret i32 %call
@@ -167,8 +167,8 @@ entry:
 ; CHECK-LABEL: caller40
 ; CHECK: ldp x1, x2,
 ; CHECK: ldp x3, x4,
-  %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
-  %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
+  %0 = load [2 x i64], ptr @g40, align 4
+  %1 = load [2 x i64], ptr @g40_2, align 4
   %call = tail call i32 @f40(i32 3, [2 x i64] %0, [2 x i64] %1) #5
   ret i32 %call
 }
@@ -185,8 +185,8 @@ entry:
 ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8]
 ; CHECK: mov w[[C:[0-9]+]], #9
 ; CHECK: str w[[C]], [sp]
-  %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
-  %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
+  %0 = load [2 x i64], ptr @g40, align 4
+  %1 = load [2 x i64], ptr @g40_2, align 4
   %call = tail call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
                          i32 7, i32 8, i32 9, [2 x i64] %0, [2 x i64] %1) #5
   ret i32 %call
@@ -221,8 +221,8 @@ entry:
 ; CHECK-LABEL: caller41
 ; CHECK: ldp x1, x2,
 ; CHECK: ldp x3, x4,
-  %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
-  %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
+  %0 = load i128, ptr @g41, align 16
+  %1 = load i128, ptr @g41_2, align 16
   %call = tail call i32 @f41(i32 3, i128 %0, i128 %1) #5
   ret i32 %call
 }
@@ -239,15 +239,15 @@ entry:
 ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
 ; CHECK: mov w[[C:[0-9]+]], #9
 ; CHECK: str w[[C]], [sp]
-  %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
-  %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
+  %0 = load i128, ptr @g41, align 16
+  %1 = load i128, ptr @g41_2, align 16
   %call = tail call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
                             i32 7, i32 8, i32 9, i128 %0, i128 %1) #5
   ret i32 %call
 }
 
 ; structs with size of 22 bytes, passed indirectly in x1 and x2
-define i32 @f42(i32 %i, %struct.s42* nocapture %s1, %struct.s42* nocapture %s2) #2 {
+define i32 @f42(i32 %i, ptr nocapture %s1, ptr nocapture %s2) #2 {
 entry:
 ; CHECK-LABEL: f42
 ; CHECK: ldr w[[A:[0-9]+]], [x1]
@@ -259,15 +259,13 @@ entry:
 ; FAST: ldr w[[B:[0-9]+]], [x2]
 ; FAST: add w[[C:[0-9]+]], w[[A]], w0
 ; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
-  %i1 = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 0
-  %0 = load i32, i32* %i1, align 4, !tbaa !0
-  %i2 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 0
-  %1 = load i32, i32* %i2, align 4, !tbaa !0
-  %s = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 1
-  %2 = load i16, i16* %s, align 2, !tbaa !3
+  %0 = load i32, ptr %s1, align 4, !tbaa !0
+  %1 = load i32, ptr %s2, align 4, !tbaa !0
+  %s = getelementptr inbounds %struct.s42, ptr %s1, i64 0, i32 1
+  %2 = load i16, ptr %s, align 2, !tbaa !3
   %conv = sext i16 %2 to i32
-  %s5 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 1
-  %3 = load i16, i16* %s5, align 2, !tbaa !3
+  %s5 = getelementptr inbounds %struct.s42, ptr %s2, i64 0, i32 1
+  %3 = load i16, ptr %s5, align 2, !tbaa !3
   %conv6 = sext i16 %3 to i32
   %add = add i32 %0, %i
   %add3 = add i32 %add, %1
@@ -300,19 +298,17 @@ entry:
 ; FAST: bl _memcpy
   %tmp = alloca %struct.s42, align 4
   %tmp1 = alloca %struct.s42, align 4
-  %0 = bitcast %struct.s42* %tmp to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4
-  %1 = bitcast %struct.s42* %tmp1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4
-  %call = call i32 @f42(i32 3, %struct.s42* %tmp, %struct.s42* %tmp1) #5
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 @g42, i64 24, i1 false), !tbaa.struct !4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp1, ptr align 4 @g42_2, i64 24, i1 false), !tbaa.struct !4
+  %call = call i32 @f42(i32 3, ptr %tmp, ptr %tmp1) #5
   ret i32 %call
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) #4
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) #4
 
 declare i32 @f42_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
-                       i32 %i7, i32 %i8, i32 %i9, %struct.s42* nocapture %s1,
-                       %struct.s42* nocapture %s2) #2
+                       i32 %i7, i32 %i8, i32 %i9, ptr nocapture %s1,
+                       ptr nocapture %s2) #2
 
 define i32 @caller42_stack() #3 {
 entry:
@@ -349,18 +345,16 @@ entry:
 ; FAST: str {{x[0-9]+}}, [sp, #16]
   %tmp = alloca %struct.s42, align 4
   %tmp1 = alloca %struct.s42, align 4
-  %0 = bitcast %struct.s42* %tmp to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 bitcast (%struct.s42* @g42 to i8*), i64 24, i1 false), !tbaa.struct !4
-  %1 = bitcast %struct.s42* %tmp1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 bitcast (%struct.s42* @g42_2 to i8*), i64 24, i1 false), !tbaa.struct !4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 @g42, i64 24, i1 false), !tbaa.struct !4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp1, ptr align 4 @g42_2, i64 24, i1 false), !tbaa.struct !4
   %call = call i32 @f42_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
-                       i32 8, i32 9, %struct.s42* %tmp, %struct.s42* %tmp1) #5
+                       i32 8, i32 9, ptr %tmp, ptr %tmp1) #5
   ret i32 %call
 }
 
 ; structs with size of 22 bytes, alignment of 16
 ; passed indirectly in x1 and x2
-define i32 @f43(i32 %i, %struct.s43* nocapture %s1, %struct.s43* nocapture %s2) #2 {
+define i32 @f43(i32 %i, ptr nocapture %s1, ptr nocapture %s2) #2 {
 entry:
 ; CHECK-LABEL: f43
 ; CHECK: ldr w[[A:[0-9]+]], [x1]
@@ -372,15 +366,13 @@ entry:
 ; FAST: ldr w[[B:[0-9]+]], [x2]
 ; FAST: add w[[C:[0-9]+]], w[[A]], w0
 ; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
-  %i1 = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 0
-  %0 = load i32, i32* %i1, align 4, !tbaa !0
-  %i2 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 0
-  %1 = load i32, i32* %i2, align 4, !tbaa !0
-  %s = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 1
-  %2 = load i16, i16* %s, align 2, !tbaa !3
+  %0 = load i32, ptr %s1, align 4, !tbaa !0
+  %1 = load i32, ptr %s2, align 4, !tbaa !0
+  %s = getelementptr inbounds %struct.s43, ptr %s1, i64 0, i32 1
+  %2 = load i16, ptr %s, align 2, !tbaa !3
   %conv = sext i16 %2 to i32
-  %s5 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 1
-  %3 = load i16, i16* %s5, align 2, !tbaa !3
+  %s5 = getelementptr inbounds %struct.s43, ptr %s2, i64 0, i32 1
+  %3 = load i16, ptr %s5, align 2, !tbaa !3
   %conv6 = sext i16 %3 to i32
   %add = add i32 %0, %i
   %add3 = add i32 %add, %1
@@ -415,17 +407,15 @@ entry:
 ; FAST: mov x2, sp
   %tmp = alloca %struct.s43, align 16
   %tmp1 = alloca %struct.s43, align 16
-  %0 = bitcast %struct.s43* %tmp to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4
-  %1 = bitcast %struct.s43* %tmp1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4
-  %call = call i32 @f43(i32 3, %struct.s43* %tmp, %struct.s43* %tmp1) #5
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp, ptr align 16 @g43, i64 32, i1 false), !tbaa.struct !4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp1, ptr align 16 @g43_2, i64 32, i1 false), !tbaa.struct !4
+  %call = call i32 @f43(i32 3, ptr %tmp, ptr %tmp1) #5
   ret i32 %call
 }
 
 declare i32 @f43_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
-                       i32 %i7, i32 %i8, i32 %i9, %struct.s43* nocapture %s1,
-                       %struct.s43* nocapture %s2) #2
+                       i32 %i7, i32 %i8, i32 %i9, ptr nocapture %s1,
+                       ptr nocapture %s2) #2
 
 define i32 @caller43_stack() #3 {
 entry:
@@ -464,12 +454,10 @@ entry:
 ; FAST: str x[[B]], [sp, #16]
   %tmp = alloca %struct.s43, align 16
   %tmp1 = alloca %struct.s43, align 16
-  %0 = bitcast %struct.s43* %tmp to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %0, i8* align 16 bitcast (%struct.s43* @g43 to i8*), i64 32, i1 false), !tbaa.struct !4
-  %1 = bitcast %struct.s43* %tmp1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %1, i8* align 16 bitcast (%struct.s43* @g43_2 to i8*), i64 32, i1 false), !tbaa.struct !4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp, ptr align 16 @g43, i64 32, i1 false), !tbaa.struct !4
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 %tmp1, ptr align 16 @g43_2, i64 32, i1 false), !tbaa.struct !4
   %call = call i32 @f43_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
-                       i32 8, i32 9, %struct.s43* %tmp, %struct.s43* %tmp1) #5
+                       i32 8, i32 9, ptr %tmp, ptr %tmp1) #5
   ret i32 %call
 }
 
@@ -492,7 +480,7 @@ entry:
 ; Load/Store opt is disabled with -O0, so the i128 is split.
 ; FAST: str {{x[0-9]+}}, [x[[ADDR]], #8]
 ; FAST: str {{x[0-9]+}}, [x[[ADDR]]]
-  %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
+  %0 = load i128, ptr @g41, align 16
   %call = tail call i32 @callee_i128_split(i32 1, i32 2, i32 3, i32 4, i32 5,
                                            i32 6, i32 7, i128 %0, i32 8) #5
   ret i32 %call
@@ -513,7 +501,7 @@ entry:
 ; FAST: mov x[[R0:[0-9]+]], sp
 ; FAST: mov w[[R1:[0-9]+]], #8
 ; FAST: str w[[R1]], [x[[R0]]]
-  %0 = load i64, i64* bitcast (%struct.s41* @g41 to i64*), align 16
+  %0 = load i64, ptr @g41, align 16
   %call = tail call i32 @callee_i64(i32 1, i32 2, i32 3, i32 4, i32 5,
                                     i32 6, i32 7, i64 %0, i32 8) #5
   ret i32 %call

diff  --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index da1f366757a83..d593272be1aa2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -O3 -mtriple arm64-apple-ios3 -aarch64-enable-gep-opt=false %s -o - | FileCheck %s
 ; <rdar://problem/13621857>
 
- at block = common global i8* null, align 8
+ at block = common global ptr null, align 8
 
 define i32 @fct(i32 %i1, i32 %i2) {
 ; CHECK: @fct
@@ -11,12 +11,12 @@ define i32 @fct(i32 %i1, i32 %i2) {
 ; _CHECK-NOT: , sxtw]
 entry:
   %idxprom = sext i32 %i1 to i64
-  %0 = load i8*, i8** @block, align 8
-  %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
-  %1 = load i8, i8* %arrayidx, align 1
+  %0 = load ptr, ptr @block, align 8
+  %arrayidx = getelementptr inbounds i8, ptr %0, i64 %idxprom
+  %1 = load i8, ptr %arrayidx, align 1
   %idxprom1 = sext i32 %i2 to i64
-  %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
-  %2 = load i8, i8* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %0, i64 %idxprom1
+  %2 = load i8, ptr %arrayidx2, align 1
   %cmp = icmp eq i8 %1, %2
   br i1 %cmp, label %if.end, label %if.then
 
@@ -29,11 +29,11 @@ if.end:                                           ; preds = %entry
   %inc = add nsw i32 %i1, 1
   %inc9 = add nsw i32 %i2, 1
   %idxprom10 = sext i32 %inc to i64
-  %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10
-  %3 = load i8, i8* %arrayidx11, align 1
+  %arrayidx11 = getelementptr inbounds i8, ptr %0, i64 %idxprom10
+  %3 = load i8, ptr %arrayidx11, align 1
   %idxprom12 = sext i32 %inc9 to i64
-  %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12
-  %4 = load i8, i8* %arrayidx13, align 1
+  %arrayidx13 = getelementptr inbounds i8, ptr %0, i64 %idxprom12
+  %4 = load i8, ptr %arrayidx13, align 1
   %cmp16 = icmp eq i8 %3, %4
   br i1 %cmp16, label %if.end23, label %if.then18
 
@@ -46,11 +46,11 @@ if.end23:                                         ; preds = %if.end
   %inc24 = add nsw i32 %i1, 2
   %inc25 = add nsw i32 %i2, 2
   %idxprom26 = sext i32 %inc24 to i64
-  %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26
-  %5 = load i8, i8* %arrayidx27, align 1
+  %arrayidx27 = getelementptr inbounds i8, ptr %0, i64 %idxprom26
+  %5 = load i8, ptr %arrayidx27, align 1
   %idxprom28 = sext i32 %inc25 to i64
-  %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28
-  %6 = load i8, i8* %arrayidx29, align 1
+  %arrayidx29 = getelementptr inbounds i8, ptr %0, i64 %idxprom28
+  %6 = load i8, ptr %arrayidx29, align 1
   %cmp32 = icmp eq i8 %5, %6
   br i1 %cmp32, label %return, label %if.then34
 
@@ -71,12 +71,12 @@ define i32 @fct1(i32 %i1, i32 %i2) optsize {
 ; CHECK: , sxtw]
 entry:
   %idxprom = sext i32 %i1 to i64
-  %0 = load i8*, i8** @block, align 8
-  %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
-  %1 = load i8, i8* %arrayidx, align 1
+  %0 = load ptr, ptr @block, align 8
+  %arrayidx = getelementptr inbounds i8, ptr %0, i64 %idxprom
+  %1 = load i8, ptr %arrayidx, align 1
   %idxprom1 = sext i32 %i2 to i64
-  %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
-  %2 = load i8, i8* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %0, i64 %idxprom1
+  %2 = load i8, ptr %arrayidx2, align 1
   %cmp = icmp eq i8 %1, %2
   br i1 %cmp, label %if.end, label %if.then
 
@@ -89,11 +89,11 @@ if.end:                                           ; preds = %entry
   %inc = add nsw i32 %i1, 1
   %inc9 = add nsw i32 %i2, 1
   %idxprom10 = sext i32 %inc to i64
-  %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10
-  %3 = load i8, i8* %arrayidx11, align 1
+  %arrayidx11 = getelementptr inbounds i8, ptr %0, i64 %idxprom10
+  %3 = load i8, ptr %arrayidx11, align 1
   %idxprom12 = sext i32 %inc9 to i64
-  %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12
-  %4 = load i8, i8* %arrayidx13, align 1
+  %arrayidx13 = getelementptr inbounds i8, ptr %0, i64 %idxprom12
+  %4 = load i8, ptr %arrayidx13, align 1
   %cmp16 = icmp eq i8 %3, %4
   br i1 %cmp16, label %if.end23, label %if.then18
 
@@ -106,11 +106,11 @@ if.end23:                                         ; preds = %if.end
   %inc24 = add nsw i32 %i1, 2
   %inc25 = add nsw i32 %i2, 2
   %idxprom26 = sext i32 %inc24 to i64
-  %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26
-  %5 = load i8, i8* %arrayidx27, align 1
+  %arrayidx27 = getelementptr inbounds i8, ptr %0, i64 %idxprom26
+  %5 = load i8, ptr %arrayidx27, align 1
   %idxprom28 = sext i32 %inc25 to i64
-  %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28
-  %6 = load i8, i8* %arrayidx29, align 1
+  %arrayidx29 = getelementptr inbounds i8, ptr %0, i64 %idxprom28
+  %6 = load i8, ptr %arrayidx29, align 1
   %cmp32 = icmp eq i8 %5, %6
   br i1 %cmp32, label %return, label %if.then34
 
@@ -126,7 +126,7 @@ return:                                           ; preds = %if.end23, %if.then3
 
 ; CHECK: @test
 ; CHECK-NOT: , uxtw #2]
-define i32 @test(i32* %array, i8 zeroext %c, i32 %arg) {
+define i32 @test(ptr %array, i8 zeroext %c, i32 %arg) {
 entry:
   %conv = zext i8 %c to i32
   %add = sub i32 0, %arg
@@ -135,9 +135,9 @@ entry:
 
 if.then:                                          ; preds = %entry
   %idxprom = zext i8 %c to i64
-  %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom
-  %0 = load volatile i32, i32* %arrayidx, align 4
-  %1 = load volatile i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %array, i64 %idxprom
+  %0 = load volatile i32, ptr %arrayidx, align 4
+  %1 = load volatile i32, ptr %arrayidx, align 4
   %add3 = add nsw i32 %1, %0
   br label %if.end
 
@@ -150,7 +150,7 @@ if.end:                                           ; preds = %entry, %if.then
 ; CHECK: @test2
 ; CHECK: , uxtw #2]
 ; CHECK: , uxtw #2]
-define i32 @test2(i32* %array, i8 zeroext %c, i32 %arg) optsize {
+define i32 @test2(ptr %array, i8 zeroext %c, i32 %arg) optsize {
 entry:
   %conv = zext i8 %c to i32
   %add = sub i32 0, %arg
@@ -159,9 +159,9 @@ entry:
 
 if.then:                                          ; preds = %entry
   %idxprom = zext i8 %c to i64
-  %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom
-  %0 = load volatile i32, i32* %arrayidx, align 4
-  %1 = load volatile i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %array, i64 %idxprom
+  %0 = load volatile i32, ptr %arrayidx, align 4
+  %1 = load volatile i32, ptr %arrayidx, align 4
   %add3 = add nsw i32 %1, %0
   br label %if.end
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
index 09fd578f8ccc1..3163ca0fb891b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
@@ -5,7 +5,7 @@
 ; way of the NEXT patterns.
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
 
- at block = common global i8* null, align 8
+ at block = common global ptr null, align 8
 
 define zeroext i8 @fullGtU(i32 %i1, i32 %i2) {
 ; CHECK-LABEL: fullGtU:
@@ -40,12 +40,12 @@ define zeroext i8 @fullGtU(i32 %i1, i32 %i2) {
 ; CHECK-NEXT:    ret
 entry:
   %idxprom = sext i32 %i1 to i64
-  %tmp = load i8*, i8** @block, align 8
-  %arrayidx = getelementptr inbounds i8, i8* %tmp, i64 %idxprom
-  %tmp1 = load i8, i8* %arrayidx, align 1
+  %tmp = load ptr, ptr @block, align 8
+  %arrayidx = getelementptr inbounds i8, ptr %tmp, i64 %idxprom
+  %tmp1 = load i8, ptr %arrayidx, align 1
   %idxprom1 = sext i32 %i2 to i64
-  %arrayidx2 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom1
-  %tmp2 = load i8, i8* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom1
+  %tmp2 = load i8, ptr %arrayidx2, align 1
   %cmp = icmp eq i8 %tmp1, %tmp2
   br i1 %cmp, label %if.end, label %if.then
 
@@ -58,11 +58,11 @@ if.end:                                           ; preds = %entry
   %inc = add nsw i32 %i1, 1
   %inc10 = add nsw i32 %i2, 1
   %idxprom11 = sext i32 %inc to i64
-  %arrayidx12 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom11
-  %tmp3 = load i8, i8* %arrayidx12, align 1
+  %arrayidx12 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom11
+  %tmp3 = load i8, ptr %arrayidx12, align 1
   %idxprom13 = sext i32 %inc10 to i64
-  %arrayidx14 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom13
-  %tmp4 = load i8, i8* %arrayidx14, align 1
+  %arrayidx14 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom13
+  %tmp4 = load i8, ptr %arrayidx14, align 1
   %cmp17 = icmp eq i8 %tmp3, %tmp4
   br i1 %cmp17, label %if.end25, label %if.then19
 
@@ -75,11 +75,11 @@ if.end25:                                         ; preds = %if.end
   %inc26 = add nsw i32 %i1, 2
   %inc27 = add nsw i32 %i2, 2
   %idxprom28 = sext i32 %inc26 to i64
-  %arrayidx29 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom28
-  %tmp5 = load i8, i8* %arrayidx29, align 1
+  %arrayidx29 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom28
+  %tmp5 = load i8, ptr %arrayidx29, align 1
   %idxprom30 = sext i32 %inc27 to i64
-  %arrayidx31 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom30
-  %tmp6 = load i8, i8* %arrayidx31, align 1
+  %arrayidx31 = getelementptr inbounds i8, ptr %tmp, i64 %idxprom30
+  %tmp6 = load i8, ptr %arrayidx31, align 1
   %cmp34 = icmp eq i8 %tmp5, %tmp6
   br i1 %cmp34, label %return, label %if.then36
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
index 447095284cef7..cc9b47c049d56 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
@@ -5,48 +5,48 @@
 @object = external hidden global i64, section "__DATA, __objc_ivar", align 8
 
 ; base + offset (imm9)
-define void @t1(i64* %object) {
+define void @t1(ptr %object) {
 ; CHECK-LABEL: t1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr xzr, [x0, #8]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 1
-  %tmp = load volatile i64, i64* %incdec.ptr, align 8
+  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 1
+  %tmp = load volatile i64, ptr %incdec.ptr, align 8
   ret void
 }
 
 ; base + offset (> imm9)
-define void @t2(i64* %object) {
+define void @t2(ptr %object) {
 ; CHECK-LABEL: t2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #264
 ; CHECK-NEXT:    ldr xzr, [x8]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 -33
-  %tmp = load volatile i64, i64* %incdec.ptr, align 8
+  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 -33
+  %tmp = load volatile i64, ptr %incdec.ptr, align 8
   ret void
 }
 
 ; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
-define void @t3(i64* %object) {
+define void @t3(ptr %object) {
 ; CHECK-LABEL: t3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr xzr, [x0, #32760]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4095
-  %tmp = load volatile i64, i64* %incdec.ptr, align 8
+  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4095
+  %tmp = load volatile i64, ptr %incdec.ptr, align 8
   ret void
 }
 
 ; base + unsigned offset (> imm12 * size of type in bytes)
-define void @t4(i64* %object) {
+define void @t4(ptr %object) {
 ; CHECK-LABEL: t4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32768
 ; CHECK-NEXT:    ldr xzr, [x0, x8]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %object, i64 4096
-  %tmp = load volatile i64, i64* %incdec.ptr, align 8
+  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4096
+  %tmp = load volatile i64, ptr %incdec.ptr, align 8
   ret void
 }
 
@@ -58,22 +58,22 @@ define void @t5(i64 %a) {
 ; CHECK-NEXT:    add x8, x8, :lo12:object
 ; CHECK-NEXT:    ldr xzr, [x8, x0, lsl #3]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a
-  %tmp = load volatile i64, i64* %incdec.ptr, align 8
+  %incdec.ptr = getelementptr inbounds i64, ptr @object, i64 %a
+  %tmp = load volatile i64, ptr %incdec.ptr, align 8
   ret void
 }
 
 ; base + reg + imm
-define void @t6(i64 %a, i64* %object) {
+define void @t6(i64 %a, ptr %object) {
 ; CHECK-LABEL: t6:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32768
 ; CHECK-NEXT:    add x9, x1, x0, lsl #3
 ; CHECK-NEXT:    ldr xzr, [x9, x8]
 ; CHECK-NEXT:    ret
-  %tmp1 = getelementptr inbounds i64, i64* %object, i64 %a
-  %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096
-  %tmp = load volatile i64, i64* %incdec.ptr, align 8
+  %tmp1 = getelementptr inbounds i64, ptr %object, i64 %a
+  %incdec.ptr = getelementptr inbounds i64, ptr %tmp1, i64 4096
+  %tmp = load volatile i64, ptr %incdec.ptr, align 8
   ret void
 }
 
@@ -85,8 +85,8 @@ define void @t7(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x0, x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 65535   ;0xffff
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -97,8 +97,8 @@ define void @t8(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x0, x8]
 ; CHECK-NEXT:    ret
   %1 = sub i64 %a, 4662   ;-4662 is 0xffffffffffffedca
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -109,8 +109,8 @@ define void @t9(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x0, x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 -305463297, %a   ;-305463297 is 0xffffffffedcaffff
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -121,8 +121,8 @@ define void @t10(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x0, x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 81909218222800896   ;0x123000000000000
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -134,8 +134,8 @@ define void @t11(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x0, x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 19088743   ;0x1234567
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -147,8 +147,8 @@ define void @t12(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 4095   ;0xfff
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -159,8 +159,8 @@ define void @t13(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, -4095   ;-0xfff
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -171,8 +171,8 @@ define void @t14(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 1191936   ;0x123000
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -183,8 +183,8 @@ define void @t15(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, -1191936   ;0xFFFFFFFFFFEDD000
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -194,8 +194,8 @@ define void @t16(i64 %a) {
 ; CHECK-NEXT:    ldr xzr, [x0, #28672]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 28672   ;0x7000
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }
 
@@ -205,7 +205,7 @@ define void @t17(i64 %a) {
 ; CHECK-NEXT:    ldur xzr, [x0, #-256]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, -256   ;-0x100
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load volatile i64, i64* %2, align 8
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load volatile i64, ptr %2, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
index 83ac21c165f97..0a91ada527d82 100644
--- a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
@@ -5,7 +5,7 @@
 ; aligned.
 @T3_retval = common global <16 x float> zeroinitializer, align 16
 
-define void @test(<16 x float>* noalias sret(<16 x float>) %agg.result) nounwind ssp {
+define void @test(ptr noalias sret(<16 x float>) %agg.result) nounwind ssp {
 entry:
 ; CHECK: test
 ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [sp, #32]
@@ -13,9 +13,9 @@ entry:
 ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [[[BASE:x[0-9]+]], #32]
 ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [[[BASE]]]
  %retval = alloca <16 x float>, align 16
- %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
- store <16 x float> %0, <16 x float>* %retval
- %1 = load <16 x float>, <16 x float>* %retval
- store <16 x float> %1, <16 x float>* %agg.result, align 16
+ %0 = load <16 x float>, ptr @T3_retval, align 16
+ store <16 x float> %0, ptr %retval
+ %1 = load <16 x float>, ptr %retval
+ store <16 x float> %1, ptr %agg.result, align 16
  ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll b/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
index 0267acdda658b..58446f39c4a33 100644
--- a/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
@@ -10,18 +10,17 @@ define i32 @foo(i32 %a) nounwind {
   %i = alloca i32, align 4
   %arr2 = alloca [32 x i32], align 4
   %j = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %tmp = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %tmp = load i32, ptr %a.addr, align 4
   %tmp1 = zext i32 %tmp to i64
   %v = mul i64 4, %tmp1
   %vla = alloca i8, i64 %v, align 4
-  %tmp2 = bitcast i8* %vla to i32*
-  %tmp3 = load i32, i32* %a.addr, align 4
-  store i32 %tmp3, i32* %i, align 4
-  %tmp4 = load i32, i32* %a.addr, align 4
-  store i32 %tmp4, i32* %j, align 4
-  %tmp5 = load i32, i32* %j, align 4
-  store i32 %tmp5, i32* %retval
-  %x = load i32, i32* %retval
+  %tmp3 = load i32, ptr %a.addr, align 4
+  store i32 %tmp3, ptr %i, align 4
+  %tmp4 = load i32, ptr %a.addr, align 4
+  store i32 %tmp4, ptr %j, align 4
+  %tmp5 = load i32, ptr %j, align 4
+  store i32 %tmp5, ptr %retval
+  %x = load i32, ptr %retval
   ret i32 %x
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll b/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
index f528c9cfabf4c..4bc3ba0f0a966 100644
--- a/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
@@ -2,10 +2,10 @@
 ; ModuleID = 'and-cbz-extr-mr.bc'
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64-S128"
 
-define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, i8* %str1, i8* %str2, i8* %str3, i8* %str4, i8* %str5, i8* %str6, i8* %str7, i8* %str8, i8* %str9, i8* %str10, i8* %str11, i8* %str12, i8* %str13, i32 %int1, i8* %str14) unnamed_addr #0 align 2 {
+define zeroext i1 @foo(i1 %IsEditable, i1 %isTextField, ptr %str1, ptr %str2, ptr %str3, ptr %str4, ptr %str5, ptr %str6, ptr %str7, ptr %str8, ptr %str9, ptr %str10, ptr %str11, ptr %str12, ptr %str13, i32 %int1, ptr %str14) unnamed_addr #0 align 2 {
 ; CHECK: _foo:
 entry:
-  %tobool = icmp eq i8* %str14, null
+  %tobool = icmp eq ptr %str14, null
   br i1 %tobool, label %return, label %if.end
 
 ; CHECK: %if.end
@@ -16,19 +16,19 @@ if.end:                                           ; preds = %entry
   br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i, !prof !1
 
 land.rhs.i:                                       ; preds = %if.end
-  %cmp.i.i.i = icmp eq i8* %str12, %str13
+  %cmp.i.i.i = icmp eq ptr %str12, %str13
   br i1 %cmp.i.i.i, label %if.then3, label %lor.rhs.i.i.i
 
 lor.rhs.i.i.i:                                    ; preds = %land.rhs.i
-  %cmp.i13.i.i.i = icmp eq i8* %str10, %str11
+  %cmp.i13.i.i.i = icmp eq ptr %str10, %str11
   br i1 %cmp.i13.i.i.i, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, label %if.end5
 
 _ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit: ; preds = %lor.rhs.i.i.i
-  %cmp.i.i.i.i = icmp eq i8* %str8, %str9
+  %cmp.i.i.i.i = icmp eq ptr %str8, %str9
   br i1 %cmp.i.i.i.i, label %if.then3, label %if.end5
 
 if.then3:                                         ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %land.rhs.i
-  %tmp11 = load i8, i8* %str14, align 8
+  %tmp11 = load i8, ptr %str14, align 8
   %tmp12 = and i8 %tmp11, 2
   %tmp13 = icmp ne i8 %tmp12, 0
   br label %return
@@ -39,22 +39,22 @@ if.end5:                                          ; preds = %_ZNK7WebCore4Node10
   br i1 %tobool.i.i.i, label %if.end12, label %land.rhs.i19, !prof !1
 
 land.rhs.i19:                                     ; preds = %if.end5
-  %cmp.i.i.i18 = icmp eq i8* %str6, %str7
+  %cmp.i.i.i18 = icmp eq ptr %str6, %str7
   br i1 %cmp.i.i.i18, label %if.then7, label %lor.rhs.i.i.i23
 
 lor.rhs.i.i.i23:                                  ; preds = %land.rhs.i19
-  %cmp.i13.i.i.i22 = icmp eq i8* %str3, %str4
+  %cmp.i13.i.i.i22 = icmp eq ptr %str3, %str4
   br i1 %cmp.i13.i.i.i22, label %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, label %if.end12
 
 _ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28: ; preds = %lor.rhs.i.i.i23
-  %cmp.i.i.i.i26 = icmp eq i8* %str1, %str2
+  %cmp.i.i.i.i26 = icmp eq ptr %str1, %str2
   br i1 %cmp.i.i.i.i26, label %if.then7, label %if.end12
 
 if.then7:                                         ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit28, %land.rhs.i19
   br i1 %isTextField, label %if.then9, label %if.end12
 
 if.then9:                                         ; preds = %if.then7
-  %tmp23 = load i8, i8* %str5, align 8
+  %tmp23 = load i8, ptr %str5, align 8
   %tmp24 = and i8 %tmp23, 2
   %tmp25 = icmp ne i8 %tmp24, 0
   br label %return

diff  --git a/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll b/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll
index 2bf13606ba7d0..9a5069b8973a9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-arm64-dead-def-elimination-flag.ll
@@ -5,7 +5,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 ; Function Attrs: nounwind ssp uwtable
 define i32 @test1() #0 {
   %tmp1 = alloca i8
-  %tmp2 = icmp eq i8* %tmp1, null
+  %tmp2 = icmp eq ptr %tmp1, null
   %tmp3 = zext i1 %tmp2 to i32
 
   ret i32 %tmp3

diff  --git a/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll b/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll
index df4a9010dfa9c..9cbbabed34936 100644
--- a/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll
@@ -4,7 +4,7 @@
 declare i32 @test(i32) local_unnamed_addr
 declare i32 @test1(i64) local_unnamed_addr
 
-define i32 @assertzext(i32 %n, i1 %a, i32* %b) local_unnamed_addr {
+define i32 @assertzext(i32 %n, i1 %a, ptr %b) local_unnamed_addr {
 ; CHECK-LABEL: assertzext:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -34,7 +34,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                     ; preds = %entry
-  store i32 0, i32* %b, align 4
+  store i32 0, ptr %b, align 4
   br label %if.end
 
 if.end:                      ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
index c5884aecc706e..37c61d0a4a0fb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -5,7 +5,7 @@
 
 @var = global i128 0
 
-define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
+define i128 @val_compare_and_swap(ptr %p, i128 %oldval, i128 %newval) {
 ; NOOUTLINE-LABEL: val_compare_and_swap:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB0_1: // =>This Inner Loop Header: Depth=1
@@ -51,12 +51,12 @@ define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
 ; LSE-NEXT:    mov x0, x2
 ; LSE-NEXT:    mov x1, x3
 ; LSE-NEXT:    ret
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval acquire acquire
   %val = extractvalue { i128, i1 } %pair, 0
   ret i128 %val
 }
 
-define i128 @val_compare_and_swap_seqcst(i128* %p, i128 %oldval, i128 %newval) {
+define i128 @val_compare_and_swap_seqcst(ptr %p, i128 %oldval, i128 %newval) {
 ; NOOUTLINE-LABEL: val_compare_and_swap_seqcst:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB1_1: // =>This Inner Loop Header: Depth=1
@@ -102,12 +102,12 @@ define i128 @val_compare_and_swap_seqcst(i128* %p, i128 %oldval, i128 %newval) {
 ; LSE-NEXT:    mov x0, x2
 ; LSE-NEXT:    mov x1, x3
 ; LSE-NEXT:    ret
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval seq_cst seq_cst
   %val = extractvalue { i128, i1 } %pair, 0
   ret i128 %val
 }
 
-define i128 @val_compare_and_swap_release(i128* %p, i128 %oldval, i128 %newval) {
+define i128 @val_compare_and_swap_release(ptr %p, i128 %oldval, i128 %newval) {
 ; NOOUTLINE-LABEL: val_compare_and_swap_release:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB2_1: // =>This Inner Loop Header: Depth=1
@@ -153,12 +153,12 @@ define i128 @val_compare_and_swap_release(i128* %p, i128 %oldval, i128 %newval)
 ; LSE-NEXT:    mov x0, x2
 ; LSE-NEXT:    mov x1, x3
 ; LSE-NEXT:    ret
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release monotonic
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval release monotonic
   %val = extractvalue { i128, i1 } %pair, 0
   ret i128 %val
 }
 
-define i128 @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval) {
+define i128 @val_compare_and_swap_monotonic(ptr %p, i128 %oldval, i128 %newval) {
 ; NOOUTLINE-LABEL: val_compare_and_swap_monotonic:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB3_1: // =>This Inner Loop Header: Depth=1
@@ -204,12 +204,12 @@ define i128 @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval
 ; LSE-NEXT:    mov x0, x2
 ; LSE-NEXT:    mov x1, x3
 ; LSE-NEXT:    ret
-  %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval monotonic monotonic
+  %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval monotonic monotonic
   %val = extractvalue { i128, i1 } %pair, 0
   ret i128 %val
 }
 
-define void @fetch_and_nand(i128* %p, i128 %bits) {
+define void @fetch_and_nand(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_nand:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB4_1: // %atomicrmw.start
@@ -267,12 +267,12 @@ define void @fetch_and_nand(i128* %p, i128 %bits) {
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
 
-  %val = atomicrmw nand i128* %p, i128 %bits release
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw nand ptr %p, i128 %bits release
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define void @fetch_and_or(i128* %p, i128 %bits) {
+define void @fetch_and_or(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_or:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB5_1: // %atomicrmw.start
@@ -324,12 +324,12 @@ define void @fetch_and_or(i128* %p, i128 %bits) {
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
 
-  %val = atomicrmw or i128* %p, i128 %bits seq_cst
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw or ptr %p, i128 %bits seq_cst
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define void @fetch_and_add(i128* %p, i128 %bits) {
+define void @fetch_and_add(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_add:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB6_1: // %atomicrmw.start
@@ -380,12 +380,12 @@ define void @fetch_and_add(i128* %p, i128 %bits) {
 ; LSE-NEXT:    ldr x8, [x8, :got_lo12:var]
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
-  %val = atomicrmw add i128* %p, i128 %bits seq_cst
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw add ptr %p, i128 %bits seq_cst
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define void @fetch_and_sub(i128* %p, i128 %bits) {
+define void @fetch_and_sub(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_sub:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB7_1: // %atomicrmw.start
@@ -436,12 +436,12 @@ define void @fetch_and_sub(i128* %p, i128 %bits) {
 ; LSE-NEXT:    ldr x8, [x8, :got_lo12:var]
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
-  %val = atomicrmw sub i128* %p, i128 %bits seq_cst
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw sub ptr %p, i128 %bits seq_cst
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define void @fetch_and_min(i128* %p, i128 %bits) {
+define void @fetch_and_min(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_min:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB8_1: // %atomicrmw.start
@@ -498,12 +498,12 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
 ; LSE-NEXT:    ldr x8, [x8, :got_lo12:var]
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
-  %val = atomicrmw min i128* %p, i128 %bits seq_cst
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw min ptr %p, i128 %bits seq_cst
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define void @fetch_and_max(i128* %p, i128 %bits) {
+define void @fetch_and_max(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_max:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB9_1: // %atomicrmw.start
@@ -560,12 +560,12 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
 ; LSE-NEXT:    ldr x8, [x8, :got_lo12:var]
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
-  %val = atomicrmw max i128* %p, i128 %bits seq_cst
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw max ptr %p, i128 %bits seq_cst
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define void @fetch_and_umin(i128* %p, i128 %bits) {
+define void @fetch_and_umin(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_umin:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB10_1: // %atomicrmw.start
@@ -622,12 +622,12 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
 ; LSE-NEXT:    ldr x8, [x8, :got_lo12:var]
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
-  %val = atomicrmw umin i128* %p, i128 %bits seq_cst
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw umin ptr %p, i128 %bits seq_cst
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define void @fetch_and_umax(i128* %p, i128 %bits) {
+define void @fetch_and_umax(ptr %p, i128 %bits) {
 ; NOOUTLINE-LABEL: fetch_and_umax:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB11_1: // %atomicrmw.start
@@ -684,12 +684,12 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
 ; LSE-NEXT:    ldr x8, [x8, :got_lo12:var]
 ; LSE-NEXT:    stp x4, x5, [x8]
 ; LSE-NEXT:    ret
-  %val = atomicrmw umax i128* %p, i128 %bits seq_cst
-  store i128 %val, i128* @var, align 16
+  %val = atomicrmw umax ptr %p, i128 %bits seq_cst
+  store i128 %val, ptr @var, align 16
   ret void
 }
 
-define i128 @atomic_load_seq_cst(i128* %p) {
+define i128 @atomic_load_seq_cst(ptr %p) {
 ; NOOUTLINE-LABEL: atomic_load_seq_cst:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:    mov x8, x0
@@ -720,11 +720,11 @@ define i128 @atomic_load_seq_cst(i128* %p) {
 ; LSE-NEXT:    mov x0, x2
 ; LSE-NEXT:    mov x1, x3
 ; LSE-NEXT:    ret
-   %r = load atomic i128, i128* %p seq_cst, align 16
+   %r = load atomic i128, ptr %p seq_cst, align 16
    ret i128 %r
 }
 
-define i128 @atomic_load_relaxed(i64, i64, i128* %p) {
+define i128 @atomic_load_relaxed(i64, i64, ptr %p) {
 ; NOOUTLINE-LABEL: atomic_load_relaxed:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB13_1: // %atomicrmw.start
@@ -751,12 +751,12 @@ define i128 @atomic_load_relaxed(i64, i64, i128* %p) {
 ; LSE-NEXT:    mov x1, #0
 ; LSE-NEXT:    casp x0, x1, x0, x1, [x2]
 ; LSE-NEXT:    ret
-    %r = load atomic i128, i128* %p monotonic, align 16
+    %r = load atomic i128, ptr %p monotonic, align 16
     ret i128 %r
 }
 
 
-define void @atomic_store_seq_cst(i128 %in, i128* %p) {
+define void @atomic_store_seq_cst(i128 %in, ptr %p) {
 ; NOOUTLINE-LABEL: atomic_store_seq_cst:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB14_1: // %atomicrmw.start
@@ -794,11 +794,11 @@ define void @atomic_store_seq_cst(i128 %in, i128* %p) {
 ; LSE-NEXT:    b.ne .LBB14_1
 ; LSE-NEXT:  // %bb.2: // %atomicrmw.end
 ; LSE-NEXT:    ret
-   store atomic i128 %in, i128* %p seq_cst, align 16
+   store atomic i128 %in, ptr %p seq_cst, align 16
    ret void
 }
 
-define void @atomic_store_release(i128 %in, i128* %p) {
+define void @atomic_store_release(i128 %in, ptr %p) {
 ; NOOUTLINE-LABEL: atomic_store_release:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB15_1: // %atomicrmw.start
@@ -836,11 +836,11 @@ define void @atomic_store_release(i128 %in, i128* %p) {
 ; LSE-NEXT:    b.ne .LBB15_1
 ; LSE-NEXT:  // %bb.2: // %atomicrmw.end
 ; LSE-NEXT:    ret
-   store atomic i128 %in, i128* %p release, align 16
+   store atomic i128 %in, ptr %p release, align 16
    ret void
 }
 
-define void @atomic_store_relaxed(i128 %in, i128* %p) {
+define void @atomic_store_relaxed(i128 %in, ptr %p) {
 ; NOOUTLINE-LABEL: atomic_store_relaxed:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB16_1: // %atomicrmw.start
@@ -878,13 +878,13 @@ define void @atomic_store_relaxed(i128 %in, i128* %p) {
 ; LSE-NEXT:    b.ne .LBB16_1
 ; LSE-NEXT:  // %bb.2: // %atomicrmw.end
 ; LSE-NEXT:    ret
-   store atomic i128 %in, i128* %p unordered, align 16
+   store atomic i128 %in, ptr %p unordered, align 16
    ret void
 }
 
 ; Since we store the original value to ensure no tearing for the unsuccessful
 ; case, the register used must not be xzr.
-define void @cmpxchg_dead(i128* %ptr, i128 %desired, i128 %new) {
+define void @cmpxchg_dead(ptr %ptr, i128 %desired, i128 %new) {
 ; NOOUTLINE-LABEL: cmpxchg_dead:
 ; NOOUTLINE:       // %bb.0:
 ; NOOUTLINE-NEXT:  .LBB17_1: // =>This Inner Loop Header: Depth=1
@@ -927,6 +927,6 @@ define void @cmpxchg_dead(i128* %ptr, i128 %desired, i128 %new) {
 ; LSE-NEXT:    // kill: def $x2 killed $x2 killed $x2_x3 def $x2_x3
 ; LSE-NEXT:    casp x2, x3, x4, x5, [x0]
 ; LSE-NEXT:    ret
-  cmpxchg i128* %ptr, i128 %desired, i128 %new monotonic monotonic
+  cmpxchg ptr %ptr, i128 %desired, i128 %new monotonic monotonic
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/arm64-atomic.ll
index cfdbcf9bfac99..739fc8bbcaf07 100644
--- a/llvm/test/CodeGen/AArch64/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-atomic.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck -enable-var-scope %s
 ; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false -verify-machineinstrs -mcpu=cyclone -mattr=+outline-atomics | FileCheck -enable-var-scope %s -check-prefix=OUTLINE-ATOMICS
 
-define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
+define i32 @val_compare_and_swap(ptr %p, i32 %cmp, i32 %new) #0 {
 ; OUTLINE-ATOMICS: bl __aarch64_cas4_acq
 ; CHECK-LABEL: val_compare_and_swap:
 ; CHECK-NEXT: mov    x[[ADDR:[0-9]+]], x0
@@ -15,12 +15,12 @@ define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
 ; CHECK-NEXT: [[FAILBB]]:
 ; CHECK-NEXT: clrex
 ; CHECK-NEXT: ret
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
+define i32 @val_compare_and_swap_from_load(ptr %p, i32 %cmp, ptr %pnew) #0 {
 ; OUTLINE-ATOMICS: bl __aarch64_cas4_acq
 ; CHECK-LABEL: val_compare_and_swap_from_load:
 ; CHECK-NEXT: ldr    [[NEW:w[0-9]+]], [x2]
@@ -36,13 +36,13 @@ define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
 ; CHECK-NEXT: clrex
 ; CHECK-NEXT: mov    x0, x[[RESULT]]
 ; CHECK-NEXT: ret
-  %new = load i32, i32* %pnew
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
+  %new = load i32, ptr %pnew
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acquire acquire
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
+define i32 @val_compare_and_swap_rel(ptr %p, i32 %cmp, i32 %new) #0 {
 ; OUTLINE-ATOMICS: bl __aarch64_cas4_acq_rel
 ; CHECK-LABEL: val_compare_and_swap_rel:
 ; CHECK-NEXT: mov    x[[ADDR:[0-9]+]], x0
@@ -56,12 +56,12 @@ define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
 ; CHECK-NEXT: [[FAILBB]]:
 ; CHECK-NEXT: clrex
 ; CHECK-NEXT: ret
-  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic
+  %pair = cmpxchg ptr %p, i32 %cmp, i32 %new acq_rel monotonic
   %val = extractvalue { i32, i1 } %pair, 0
   ret i32 %val
 }
 
-define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
+define i64 @val_compare_and_swap_64(ptr %p, i64 %cmp, i64 %new) #0 {
 ; OUTLINE-ATOMICS: bl __aarch64_cas8_relax
 ; CHECK-LABEL: val_compare_and_swap_64:
 ; CHECK-NEXT: mov    x[[ADDR:[0-9]+]], x0
@@ -75,12 +75,12 @@ define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
 ; CHECK-NEXT: [[FAILBB]]:
 ; CHECK-NEXT: clrex
 ; CHECK-NEXT: ret
-  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic
+  %pair = cmpxchg ptr %p, i64 %cmp, i64 %new monotonic monotonic
   %val = extractvalue { i64, i1 } %pair, 0
   ret i64 %val
 }
 
-define i32 @fetch_and_nand(i32* %p) #0 {
+define i32 @fetch_and_nand(ptr %p) #0 {
 ; CHECK-LABEL: fetch_and_nand:
 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
 ; CHECK: ldxr   w[[DEST_REG:[0-9]+]], [x0]
@@ -90,11 +90,11 @@ define i32 @fetch_and_nand(i32* %p) #0 {
 ; CHECK: stlxr   [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
 ; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
 ; CHECK: mov    x0, x[[DEST_REG]]
-  %val = atomicrmw nand i32* %p, i32 7 release
+  %val = atomicrmw nand ptr %p, i32 7 release
   ret i32 %val
 }
 
-define i64 @fetch_and_nand_64(i64* %p) #0 {
+define i64 @fetch_and_nand_64(ptr %p) #0 {
 ; CHECK-LABEL: fetch_and_nand_64:
 ; CHECK: mov    x[[ADDR:[0-9]+]], x0
 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
@@ -104,11 +104,11 @@ define i64 @fetch_and_nand_64(i64* %p) #0 {
 ; CHECK: stlxr   [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
 ; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
 
-  %val = atomicrmw nand i64* %p, i64 7 acq_rel
+  %val = atomicrmw nand ptr %p, i64 7 acq_rel
   ret i64 %val
 }
 
-define i32 @fetch_and_or(i32* %p) #0 {
+define i32 @fetch_and_or(ptr %p) #0 {
 ; OUTLINE-ATOMICS: bl __aarch64_ldset4_acq_rel
 ; CHECK-LABEL: fetch_and_or:
 ; CHECK: mov   [[OLDVAL_REG:w[0-9]+]], #5
@@ -119,11 +119,11 @@ define i32 @fetch_and_or(i32* %p) #0 {
 ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
 ; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
 ; CHECK: mov    x0, x[[DEST_REG]]
-  %val = atomicrmw or i32* %p, i32 5 seq_cst
+  %val = atomicrmw or ptr %p, i32 5 seq_cst
   ret i32 %val
 }
 
-define i64 @fetch_and_or_64(i64* %p) #0 {
+define i64 @fetch_and_or_64(ptr %p) #0 {
 ; OUTLINE-ATOMICS: bl __aarch64_ldset8_relax
 ; CHECK: fetch_and_or_64:
 ; CHECK: mov    x[[ADDR:[0-9]+]], x0
@@ -132,7 +132,7 @@ define i64 @fetch_and_or_64(i64* %p) #0 {
 ; CHECK: orr    [[SCRATCH2_REG:x[0-9]+]], [[DEST_REG]], #0x7
 ; CHECK: stxr   [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
 ; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
-  %val = atomicrmw or i64* %p, i64 7 monotonic
+  %val = atomicrmw or ptr %p, i64 7 monotonic
   ret i64 %val
 }
 
@@ -157,31 +157,31 @@ define void @seq_cst_fence() #0 {
    ; CHECK: dmb ish{{$}}
 }
 
-define i32 @atomic_load(i32* %p) #0 {
-   %r = load atomic i32, i32* %p seq_cst, align 4
+define i32 @atomic_load(ptr %p) #0 {
+   %r = load atomic i32, ptr %p seq_cst, align 4
    ret i32 %r
    ; CHECK-LABEL: atomic_load:
    ; CHECK: ldar
 }
 
-define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
+define i8 @atomic_load_relaxed_8(ptr %p, i32 %off32) #0 {
 ; CHECK-LABEL: atomic_load_relaxed_8:
-  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
-  %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1
+  %ptr_unsigned = getelementptr i8, ptr %p, i32 4095
+  %val_unsigned = load atomic i8, ptr %ptr_unsigned monotonic, align 1
 ; CHECK: ldrb {{w[0-9]+}}, [x0, #4095]
 
-  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
-  %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1
+  %ptr_regoff = getelementptr i8, ptr %p, i32 %off32
+  %val_regoff = load atomic i8, ptr %ptr_regoff unordered, align 1
   %tot1 = add i8 %val_unsigned, %val_regoff
 ; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
 
-  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
-  %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1
+  %ptr_unscaled = getelementptr i8, ptr %p, i32 -256
+  %val_unscaled = load atomic i8, ptr %ptr_unscaled monotonic, align 1
   %tot2 = add i8 %tot1, %val_unscaled
 ; CHECK: ldurb {{w[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
-  %val_random = load atomic i8, i8* %ptr_random unordered, align 1
+  %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+  %val_random = load atomic i8, ptr %ptr_random unordered, align 1
   %tot3 = add i8 %tot2, %val_random
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]]
@@ -189,24 +189,24 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
   ret i8 %tot3
 }
 
-define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
+define i16 @atomic_load_relaxed_16(ptr %p, i32 %off32) #0 {
 ; CHECK-LABEL: atomic_load_relaxed_16:
-  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
-  %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2
+  %ptr_unsigned = getelementptr i16, ptr %p, i32 4095
+  %val_unsigned = load atomic i16, ptr %ptr_unsigned monotonic, align 2
 ; CHECK: ldrh {{w[0-9]+}}, [x0, #8190]
 
-  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
-  %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2
+  %ptr_regoff = getelementptr i16, ptr %p, i32 %off32
+  %val_regoff = load atomic i16, ptr %ptr_regoff unordered, align 2
   %tot1 = add i16 %val_unsigned, %val_regoff
 ; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
 
-  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
-  %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2
+  %ptr_unscaled = getelementptr i16, ptr %p, i32 -128
+  %val_unscaled = load atomic i16, ptr %ptr_unscaled monotonic, align 2
   %tot2 = add i16 %tot1, %val_unscaled
 ; CHECK: ldurh {{w[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
-  %val_random = load atomic i16, i16* %ptr_random unordered, align 2
+  %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+  %val_random = load atomic i16, ptr %ptr_random unordered, align 2
   %tot3 = add i16 %tot2, %val_random
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]]
@@ -214,24 +214,24 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
   ret i16 %tot3
 }
 
-define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
+define i32 @atomic_load_relaxed_32(ptr %p, i32 %off32) #0 {
 ; CHECK-LABEL: atomic_load_relaxed_32:
-  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
-  %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4
+  %ptr_unsigned = getelementptr i32, ptr %p, i32 4095
+  %val_unsigned = load atomic i32, ptr %ptr_unsigned monotonic, align 4
 ; CHECK: ldr {{w[0-9]+}}, [x0, #16380]
 
-  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
-  %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4
+  %ptr_regoff = getelementptr i32, ptr %p, i32 %off32
+  %val_regoff = load atomic i32, ptr %ptr_regoff unordered, align 4
   %tot1 = add i32 %val_unsigned, %val_regoff
 ; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
 
-  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
-  %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4
+  %ptr_unscaled = getelementptr i32, ptr %p, i32 -64
+  %val_unscaled = load atomic i32, ptr %ptr_unscaled monotonic, align 4
   %tot2 = add i32 %tot1, %val_unscaled
 ; CHECK: ldur {{w[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
-  %val_random = load atomic i32, i32* %ptr_random unordered, align 4
+  %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+  %val_random = load atomic i32, ptr %ptr_random unordered, align 4
   %tot3 = add i32 %tot2, %val_random
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]]
@@ -239,24 +239,24 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
   ret i32 %tot3
 }
 
-define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
+define i64 @atomic_load_relaxed_64(ptr %p, i32 %off32) #0 {
 ; CHECK-LABEL: atomic_load_relaxed_64:
-  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
-  %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8
+  %ptr_unsigned = getelementptr i64, ptr %p, i32 4095
+  %val_unsigned = load atomic i64, ptr %ptr_unsigned monotonic, align 8
 ; CHECK: ldr {{x[0-9]+}}, [x0, #32760]
 
-  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
-  %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8
+  %ptr_regoff = getelementptr i64, ptr %p, i32 %off32
+  %val_regoff = load atomic i64, ptr %ptr_regoff unordered, align 8
   %tot1 = add i64 %val_unsigned, %val_regoff
 ; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
 
-  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
-  %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8
+  %ptr_unscaled = getelementptr i64, ptr %p, i32 -32
+  %val_unscaled = load atomic i64, ptr %ptr_unscaled monotonic, align 8
   %tot2 = add i64 %tot1, %val_unscaled
 ; CHECK: ldur {{x[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
-  %val_random = load atomic i64, i64* %ptr_random unordered, align 8
+  %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+  %val_random = load atomic i64, ptr %ptr_random unordered, align 8
   %tot3 = add i64 %tot2, %val_random
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]]
@@ -265,96 +265,96 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
 }
 
 
-define void @atomc_store(i32* %p) #0 {
-   store atomic i32 4, i32* %p seq_cst, align 4
+define void @atomc_store(ptr %p) #0 {
+   store atomic i32 4, ptr %p seq_cst, align 4
    ret void
    ; CHECK-LABEL: atomc_store:
    ; CHECK: stlr
 }
 
-define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 {
+define void @atomic_store_relaxed_8(ptr %p, i32 %off32, i8 %val) #0 {
 ; CHECK-LABEL: atomic_store_relaxed_8:
-  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
-  store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1
+  %ptr_unsigned = getelementptr i8, ptr %p, i32 4095
+  store atomic i8 %val, ptr %ptr_unsigned monotonic, align 1
 ; CHECK: strb {{w[0-9]+}}, [x0, #4095]
 
-  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
-  store atomic i8 %val, i8* %ptr_regoff unordered, align 1
+  %ptr_regoff = getelementptr i8, ptr %p, i32 %off32
+  store atomic i8 %val, ptr %ptr_regoff unordered, align 1
 ; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw]
 
-  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
-  store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
+  %ptr_unscaled = getelementptr i8, ptr %p, i32 -256
+  store atomic i8 %val, ptr %ptr_unscaled monotonic, align 1
 ; CHECK: sturb {{w[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
-  store atomic i8 %val, i8* %ptr_random unordered, align 1
+  %ptr_random = getelementptr i8, ptr %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+  store atomic i8 %val, ptr %ptr_random unordered, align 1
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]]
 
   ret void
 }
 
-define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 {
+define void @atomic_store_relaxed_16(ptr %p, i32 %off32, i16 %val) #0 {
 ; CHECK-LABEL: atomic_store_relaxed_16:
-  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
-  store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2
+  %ptr_unsigned = getelementptr i16, ptr %p, i32 4095
+  store atomic i16 %val, ptr %ptr_unsigned monotonic, align 2
 ; CHECK: strh {{w[0-9]+}}, [x0, #8190]
 
-  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
-  store atomic i16 %val, i16* %ptr_regoff unordered, align 2
+  %ptr_regoff = getelementptr i16, ptr %p, i32 %off32
+  store atomic i16 %val, ptr %ptr_regoff unordered, align 2
 ; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1]
 
-  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
-  store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
+  %ptr_unscaled = getelementptr i16, ptr %p, i32 -128
+  store atomic i16 %val, ptr %ptr_unscaled monotonic, align 2
 ; CHECK: sturh {{w[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
-  store atomic i16 %val, i16* %ptr_random unordered, align 2
+  %ptr_random = getelementptr i16, ptr %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+  store atomic i16 %val, ptr %ptr_random unordered, align 2
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]]
 
   ret void
 }
 
-define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 {
+define void @atomic_store_relaxed_32(ptr %p, i32 %off32, i32 %val) #0 {
 ; CHECK-LABEL: atomic_store_relaxed_32:
-  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
-  store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4
+  %ptr_unsigned = getelementptr i32, ptr %p, i32 4095
+  store atomic i32 %val, ptr %ptr_unsigned monotonic, align 4
 ; CHECK: str {{w[0-9]+}}, [x0, #16380]
 
-  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
-  store atomic i32 %val, i32* %ptr_regoff unordered, align 4
+  %ptr_regoff = getelementptr i32, ptr %p, i32 %off32
+  store atomic i32 %val, ptr %ptr_regoff unordered, align 4
 ; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2]
 
-  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
-  store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
+  %ptr_unscaled = getelementptr i32, ptr %p, i32 -64
+  store atomic i32 %val, ptr %ptr_unscaled monotonic, align 4
 ; CHECK: stur {{w[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
-  store atomic i32 %val, i32* %ptr_random unordered, align 4
+  %ptr_random = getelementptr i32, ptr %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+  store atomic i32 %val, ptr %ptr_random unordered, align 4
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: str {{w[0-9]+}}, [x[[ADDR]]]
 
   ret void
 }
 
-define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 {
+define void @atomic_store_relaxed_64(ptr %p, i32 %off32, i64 %val) #0 {
 ; OUTLINE-ATOMICS: bl __aarch64_ldadd4_acq_rel
 ; CHECK-LABEL: atomic_store_relaxed_64:
-  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
-  store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8
+  %ptr_unsigned = getelementptr i64, ptr %p, i32 4095
+  store atomic i64 %val, ptr %ptr_unsigned monotonic, align 8
 ; CHECK: str {{x[0-9]+}}, [x0, #32760]
 
-  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
-  store atomic i64 %val, i64* %ptr_regoff unordered, align 8
+  %ptr_regoff = getelementptr i64, ptr %p, i32 %off32
+  store atomic i64 %val, ptr %ptr_regoff unordered, align 8
 ; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3]
 
-  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
-  store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
+  %ptr_unscaled = getelementptr i64, ptr %p, i32 -32
+  store atomic i64 %val, ptr %ptr_unscaled monotonic, align 8
 ; CHECK: stur {{x[0-9]+}}, [x0, #-256]
 
-  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
-  store atomic i64 %val, i64* %ptr_random unordered, align 8
+  %ptr_random = getelementptr i64, ptr %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+  store atomic i64 %val, ptr %ptr_random unordered, align 8
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
 ; CHECK: str {{x[0-9]+}}, [x[[ADDR]]]
 
@@ -371,13 +371,13 @@ define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 {
 
 define i32 @next_id() nounwind optsize ssp align 2 {
 entry:
-  %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
+  %0 = atomicrmw add ptr @counter, i32 1 seq_cst
   %add.i = add i32 %0, 1
   %tobool = icmp eq i32 %add.i, 0
   br i1 %tobool, label %if.else, label %return
 
 if.else:                                          ; preds = %entry
-  %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
+  %1 = atomicrmw add ptr @counter, i32 1 seq_cst
   %add.i2 = add i32 %1, 1
   br label %return
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-bcc.ll b/llvm/test/CodeGen/AArch64/arm64-bcc.ll
index 66d2f52ab969e..08e7e9f57b641 100644
--- a/llvm/test/CodeGen/AArch64/arm64-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-bcc.ll
@@ -24,13 +24,12 @@ entry:
 
 ; Checks for compfail when optimizing csincr-cbz sequence
 
-define { i64, i1 } @foo(i64* , %Sstruct* , i1, i64) {
+define { i64, i1 } @foo(ptr , ptr , i1, i64) {
 entry:
   %.sroa.0 = alloca i72, align 16
-  %.count.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 0, i32 0
-  %4 = load i64, i64* %.count.value, align 8
-  %.repeatedValue.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 1, i32 0
-  %5 = load i32, i32* %.repeatedValue.value, align 8
+  %4 = load i64, ptr %1, align 8
+  %.repeatedValue.value = getelementptr inbounds %Sstruct, ptr %1, i64 0, i32 1, i32 0
+  %5 = load i32, ptr %.repeatedValue.value, align 8
   %6 = icmp eq i64 %4, 0
   br label %7
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
index 5cd96d09b14d0..f5aa4c666a568 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
@@ -2,1322 +2,1321 @@
 ; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -O0 -fast-isel=true -o - | FileCheck %s
 
 ; CHECK-LABEL: test_i64_f64:
-define void @test_i64_f64(double* %p, i64* %q) {
+define void @test_i64_f64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: str
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = bitcast double %2 to i64
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v1i64:
-define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) {
+define void @test_i64_v1i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: str
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = bitcast <1 x i64> %2 to i64
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v2f32:
-define void @test_i64_v2f32(<2 x float>* %p, i64* %q) {
+define void @test_i64_v2f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: str
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = bitcast <2 x float> %2 to i64
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v2i32:
-define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) {
+define void @test_i64_v2i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: str
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = bitcast <2 x i32> %2 to i64
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v4f16:
-define void @test_i64_v4f16(<4 x half>* %p, i64* %q) {
+define void @test_i64_v4f16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK-NOT: rev
 ; CHECK: fadd
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: str
-    %1 = load <4 x half>, <4 x half>* %p
+    %1 = load <4 x half>, ptr %p
     %2 = fadd <4 x half> %1, %1
     %3 = bitcast <4 x half> %2 to i64
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v4i16:
-define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) {
+define void @test_i64_v4i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: str
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = bitcast <4 x i16> %2 to i64
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v8i8:
-define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) {
+define void @test_i64_v8i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8b }
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: str
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = bitcast <8 x i8> %2 to i64
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_i64:
-define void @test_f64_i64(i64* %p, double* %q) {
+define void @test_f64_i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: str
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = bitcast i64 %2 to double
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v1i64:
-define void @test_f64_v1i64(<1 x i64>* %p, double* %q) {
+define void @test_f64_v1i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: str
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = bitcast <1 x i64> %2 to double
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v2f32:
-define void @test_f64_v2f32(<2 x float>* %p, double* %q) {
+define void @test_f64_v2f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: str
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = bitcast <2 x float> %2 to double
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v2i32:
-define void @test_f64_v2i32(<2 x i32>* %p, double* %q) {
+define void @test_f64_v2i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: str
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = bitcast <2 x i32> %2 to double
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v4i16:
-define void @test_f64_v4i16(<4 x i16>* %p, double* %q) {
+define void @test_f64_v4i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: str
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = bitcast <4 x i16> %2 to double
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v4f16:
-define void @test_f64_v4f16(<4 x half>* %p, double* %q) {
+define void @test_f64_v4f16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK-NOT: rev
 ; CHECK: fadd
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: fadd
 ; CHECK: str
-    %1 = load <4 x half>, <4 x half>* %p
+    %1 = load <4 x half>, ptr %p
     %2 = fadd <4 x half> %1, %1
     %3 = bitcast <4 x half> %2 to double
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v8i8:
-define void @test_f64_v8i8(<8 x i8>* %p, double* %q) {
+define void @test_f64_v8i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8b }
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: str
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = bitcast <8 x i8> %2 to double
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_i64:
-define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
+define void @test_v1i64_i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: str
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = bitcast i64 %2 to <1 x i64>
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_f64:
-define void @test_v1i64_f64(double* %p, <1 x i64>* %q) {
+define void @test_v1i64_f64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: str
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = bitcast double %2 to <1 x i64>
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v2f32:
-define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v2f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: str
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = bitcast <2 x float> %2 to <1 x i64>
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v2i32:
-define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v2i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: str
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = bitcast <2 x i32> %2 to <1 x i64>
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v4f16:
-define void @test_v1i64_v4f16(<4 x half>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v4f16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK-NOT: rev
 ; CHECK: fadd
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: str
-    %1 = load <4 x half>, <4 x half>* %p
+    %1 = load <4 x half>, ptr %p
     %2 = fadd <4 x half> %1, %1
     %3 = bitcast <4 x half> %2 to <1 x i64>
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v4i16:
-define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v4i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: str
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = bitcast <4 x i16> %2 to <1 x i64>
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v8i8:
-define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v8i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8b }
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: str
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = bitcast <8 x i8> %2 to <1 x i64>
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_i64:
-define void @test_v2f32_i64(i64* %p, <2 x float>* %q) {
+define void @test_v2f32_i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = bitcast i64 %2 to <2 x float>
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_f64:
-define void @test_v2f32_f64(double* %p, <2 x float>* %q) {
+define void @test_v2f32_f64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = bitcast double %2 to <2 x float>
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v1i64:
-define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) {
+define void @test_v2f32_v1i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = bitcast <1 x i64> %2 to <2 x float>
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v2i32:
-define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) {
+define void @test_v2f32_v2i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = bitcast <2 x i32> %2 to <2 x float>
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v4i16:
-define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) {
+define void @test_v2f32_v4i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK: rev32 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = bitcast <4 x i16> %2 to <2 x float>
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v4f16:
-define void @test_v2f32_v4f16(<4 x half>* %p, <2 x float>* %q) {
+define void @test_v2f32_v4f16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK-NOT: rev
 ; CHECK: fadd
 ; CHECK: rev32 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <4 x half>, <4 x half>* %p
+    %1 = load <4 x half>, ptr %p
     %2 = fadd <4 x half> %1, %1
     %3 = bitcast <4 x half> %2 to <2 x float>
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v8i8:
-define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) {
+define void @test_v2f32_v8i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8b }
 ; CHECK: rev32 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = bitcast <8 x i8> %2 to <2 x float>
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_i64:
-define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) {
+define void @test_v2i32_i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = bitcast i64 %2 to <2 x i32>
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_f64:
-define void @test_v2i32_f64(double* %p, <2 x i32>* %q) {
+define void @test_v2i32_f64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = bitcast double %2 to <2 x i32>
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v1i64:
-define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v1i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = bitcast <1 x i64> %2 to <2 x i32>
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v2f32:
-define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v2f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = bitcast <2 x float> %2 to <2 x i32>
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v4i16:
-define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v4i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK: rev32 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = bitcast <4 x i16> %2 to <2 x i32>
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v8i8:
-define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v8i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8b }
 ; CHECK: rev32 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.2s }
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = bitcast <8 x i8> %2 to <2 x i32>
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_i64:
-define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) {
+define void @test_v4i16_i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = bitcast i64 %2 to <4 x i16>
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_f64:
-define void @test_v4i16_f64(double* %p, <4 x i16>* %q) {
+define void @test_v4i16_f64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = bitcast double %2 to <4 x i16>
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v1i64:
-define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v1i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = bitcast <1 x i64> %2 to <4 x i16>
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v2f32:
-define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v2f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev32 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = bitcast <2 x float> %2 to <4 x i16>
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v2i32:
-define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v2i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev32 v{{[0-9]+}}.4h
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = bitcast <2 x i32> %2 to <4 x i16>
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v4f16:
-define void @test_v4i16_v4f16(<4 x half>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v4f16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <4 x half>, <4 x half>* %p
+    %1 = load <4 x half>, ptr %p
     %2 = fadd <4 x half> %1, %1
     %3 = bitcast <4 x half> %2 to <4 x i16>
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v8i8:
-define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v8i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8b }
 ; CHECK: rev16 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = bitcast <8 x i8> %2 to <4 x i16>
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_i64:
-define void @test_v4f16_i64(i64* %p, <4 x half>* %q) {
+define void @test_v4f16_i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: fadd
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = bitcast i64 %2 to <4 x half>
     %4 = fadd <4 x half> %3, %3
-    store <4 x half> %4, <4 x half>* %q
+    store <4 x half> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_f64:
-define void @test_v4f16_f64(double* %p, <4 x half>* %q) {
+define void @test_v4f16_f64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: fadd
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = bitcast double %2 to <4 x half>
     %4 = fadd <4 x half> %3, %3
-    store <4 x half> %4, <4 x half>* %q
+    store <4 x half> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_v1i64:
-define void @test_v4f16_v1i64(<1 x i64>* %p, <4 x half>* %q) {
+define void @test_v4f16_v1i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: fadd
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = bitcast <1 x i64> %2 to <4 x half>
     %4 = fadd <4 x half> %3, %3
-    store <4 x half> %4, <4 x half>* %q
+    store <4 x half> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_v2f32:
-define void @test_v4f16_v2f32(<2 x float>* %p, <4 x half>* %q) {
+define void @test_v4f16_v2f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev32 v{{[0-9]+}}.4h
 ; CHECK: fadd
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = bitcast <2 x float> %2 to <4 x half>
     %4 = fadd <4 x half> %3, %3
-    store <4 x half> %4, <4 x half>* %q
+    store <4 x half> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_v2i32:
-define void @test_v4f16_v2i32(<2 x i32>* %p, <4 x half>* %q) {
+define void @test_v4f16_v2i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev32 v{{[0-9]+}}.4h
 ; CHECK: fadd
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = bitcast <2 x i32> %2 to <4 x half>
     %4 = fadd <4 x half> %3, %3
-    store <4 x half> %4, <4 x half>* %q
+    store <4 x half> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_v4i16:
-define void @test_v4f16_v4i16(<4 x i16>* %p, <4 x half>* %q) {
+define void @test_v4f16_v4i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = bitcast <4 x i16> %2 to <4 x half>
     %4 = fadd <4 x half> %3, %3
-    store <4 x half> %4, <4 x half>* %q
+    store <4 x half> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_v8i8:
-define void @test_v4f16_v8i8(<8 x i8>* %p, <4 x half>* %q) {
+define void @test_v4f16_v8i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8b }
 ; CHECK: rev16 v{{[0-9]+}}.8b
 ; CHECK: fadd
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4h }
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = bitcast <8 x i8> %2 to <4 x half>
     %4 = fadd <4 x half> %3, %3
-    store <4 x half> %4, <4 x half>* %q
+    store <4 x half> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_i64:
-define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) {
+define void @test_v8i8_i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.8b }
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = bitcast i64 %2 to <8 x i8>
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_f64:
-define void @test_v8i8_f64(double* %p, <8 x i8>* %q) {
+define void @test_v8i8_f64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.8b }
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = bitcast double %2 to <8 x i8>
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v1i64:
-define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v1i64(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.8b }
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = bitcast <1 x i64> %2 to <8 x i8>
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v2f32:
-define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v2f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev32 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.8b }
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = bitcast <2 x float> %2 to <8 x i8>
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v2i32:
-define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v2i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2s }
 ; CHECK: rev32 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.8b }
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = bitcast <2 x i32> %2 to <8 x i8>
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v4i16:
-define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v4i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4h }
 ; CHECK: rev16 v{{[0-9]+}}.8b
 ; CHECK: st1 { v{{[0-9]+}}.8b }
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = bitcast <4 x i16> %2 to <8 x i8>
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v2f64:
-define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) {
+define void @test_f128_v2f64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: ext
 ; CHECK: str
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = bitcast <2 x double> %2 to fp128
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v2i64:
-define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) {
+define void @test_f128_v2i64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: ext
 ; CHECK: str
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = bitcast <2 x i64> %2 to fp128
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v4f32:
-define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) {
+define void @test_f128_v4f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK-NOT: rev
 ; CHECK: fadd
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: str q
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = bitcast <4 x float> %2 to fp128
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v4i32:
-define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) {
+define void @test_f128_v4i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: str
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = bitcast <4 x i32> %2 to fp128
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v8i16:
-define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) {
+define void @test_f128_v8i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
 ; CHECK: str
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = bitcast <8 x i16> %2 to fp128
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v16i8:
-define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) {
+define void @test_f128_v16i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.16b }
 ; CHECK: ext
 ; CHECK: str q
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = bitcast <16 x i8> %2 to fp128
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_f128:
-define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) {
+define void @test_v2f64_f128(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: ext
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = bitcast fp128 %2 to <2 x double>
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v2i64:
-define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) {
+define void @test_v2f64_v2i64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = bitcast <2 x i64> %2 to <2 x double>
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v4f32:
-define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) {
+define void @test_v2f64_v4f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK-NOT: rev
 ; CHECK: fadd
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = bitcast <4 x float> %2 to <2 x double>
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v4i32:
-define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) {
+define void @test_v2f64_v4i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = bitcast <4 x i32> %2 to <2 x double>
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v8i16:
-define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) {
+define void @test_v2f64_v8i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = bitcast <8 x i16> %2 to <2 x double>
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v16i8:
-define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) {
+define void @test_v2f64_v16i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.16b }
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = bitcast <16 x i8> %2 to <2 x double>
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_f128:
-define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) {
+define void @test_v2i64_f128(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: ext
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = bitcast fp128 %2 to <2 x i64>
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v2f64:
-define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v2f64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = bitcast <2 x double> %2 to <2 x i64>
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v4f32:
-define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v4f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK-NOT: rev
 ; CHECK: fadd
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: add
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = bitcast <4 x float> %2 to <2 x i64>
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v4i32:
-define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v4i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = bitcast <4 x i32> %2 to <2 x i64>
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v8i16:
-define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v8i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = bitcast <8 x i16> %2 to <2 x i64>
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v16i8:
-define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v16i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.16b }
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.2d }
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = bitcast <16 x i8> %2 to <2 x i64>
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_f128:
-define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) {
+define void @test_v4f32_f128(ptr %p, ptr %q) {
 ; CHECK: ldr q
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = bitcast fp128 %2 to <4 x float>
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v2f64:
-define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) {
+define void @test_v4f32_v2f64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = bitcast <2 x double> %2 to <4 x float>
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v2i64:
-define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) {
+define void @test_v4f32_v2i64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: fadd
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = bitcast <2 x i64> %2 to <4 x float>
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v4i32:
-define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) {
+define void @test_v4f32_v4i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = bitcast <4 x i32> %2 to <4 x float>
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v8i16:
-define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) {
+define void @test_v4f32_v8i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK: rev32 v{{[0-9]+}}.8h
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = bitcast <8 x i16> %2 to <4 x float>
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v16i8:
-define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) {
+define void @test_v4f32_v16i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.16b }
 ; CHECK: rev32 v{{[0-9]+}}.16b
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = bitcast <16 x i8> %2 to <4 x float>
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_f128:
-define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) {
+define void @test_v4i32_f128(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = bitcast fp128 %2 to <4 x i32>
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v2f64:
-define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v2f64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = bitcast <2 x double> %2 to <4 x i32>
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v2i64:
-define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v2i64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = bitcast <2 x i64> %2 to <4 x i32>
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v4f32:
-define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v4f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = bitcast <4 x float> %2 to <4 x i32>
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v8i16:
-define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v8i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK: rev32 v{{[0-9]+}}.8h
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = bitcast <8 x i16> %2 to <4 x i32>
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v16i8:
-define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v16i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.16b }
 ; CHECK: rev32 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.4s }
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = bitcast <16 x i8> %2 to <4 x i32>
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_f128:
-define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) {
+define void @test_v8i16_f128(ptr %p, ptr %q) {
 ; CHECK: ldr
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
 ; CHECK: st1 { v{{[0-9]+}}.8h }
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = bitcast fp128 %2 to <8 x i16>
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v2f64:
-define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v2f64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: st1 { v{{[0-9]+}}.8h }
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = bitcast <2 x double> %2 to <8 x i16>
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v2i64:
-define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v2i64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: st1 { v{{[0-9]+}}.8h }
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = bitcast <2 x i64> %2 to <8 x i16>
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v4f32:
-define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v4f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK: rev32 v{{[0-9]+}}.8h
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.8h }
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = bitcast <4 x float> %2 to <8 x i16>
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v4i32:
-define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v4i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK: rev32 v{{[0-9]+}}.8h
 ; CHECK: st1 { v{{[0-9]+}}.8h }
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = bitcast <4 x i32> %2 to <8 x i16>
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v8f16:
-define void @test_v8i16_v8f16(<8 x half>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v8f16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.8h }
-    %1 = load <8 x half>, <8 x half>* %p
+    %1 = load <8 x half>, ptr %p
     %2 = fadd <8 x half> %1, %1
     %3 = bitcast <8 x half> %2 to <8 x i16>
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v16i8:
-define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v16i8(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.16b }
 ; CHECK: rev16 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.8h }
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = bitcast <16 x i8> %2 to <8 x i16>
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_f128:
-define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) {
+define void @test_v16i8_f128(ptr %p, ptr %q) {
 ; CHECK: ldr q
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
 ; CHECK: st1 { v{{[0-9]+}}.16b }
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = bitcast fp128 %2 to <16 x i8>
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v2f64:
-define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v2f64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.16b }
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = bitcast <2 x double> %2 to <16 x i8>
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v2i64:
-define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v2i64(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.2d }
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.16b }
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = bitcast <2 x i64> %2 to <16 x i8>
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v4f32:
-define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v4f32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK: rev32 v{{[0-9]+}}.16b
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.16b }
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = bitcast <4 x float> %2 to <16 x i8>
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v4i32:
-define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v4i32(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.4s }
 ; CHECK: rev32 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.16b }
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = bitcast <4 x i32> %2 to <16 x i8>
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v8f16:
-define void @test_v16i8_v8f16(<8 x half>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v8f16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK: rev16 v{{[0-9]+}}.16b
 ; CHECK-NOT: rev
 ; CHECK: st1 { v{{[0-9]+}}.16b }
-    %1 = load <8 x half>, <8 x half>* %p
+    %1 = load <8 x half>, ptr %p
     %2 = fadd <8 x half> %1, %1
     %3 = bitcast <8 x half> %2 to <16 x i8>
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v8i16:
-define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v8i16(ptr %p, ptr %q) {
 ; CHECK: ld1 { v{{[0-9]+}}.8h }
 ; CHECK: rev16 v{{[0-9]+}}.16b
 ; CHECK: st1 { v{{[0-9]+}}.16b }
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = bitcast <8 x i16> %2 to <16 x i8>
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f16_struct:
 %struct.struct1 = type { half, half, half, half }
-define %struct.struct1 @test_v4f16_struct(%struct.struct1* %ret) {
+define %struct.struct1 @test_v4f16_struct(ptr %ret) {
 entry:
 ; CHECK: ld1 { {{v[0-9]+}}.4h }
 ; CHECK-NOT: rev
-  %0 = bitcast %struct.struct1* %ret to <4 x half>*
-  %1 = load <4 x half>, <4 x half>* %0, align 2
-  %2 = extractelement <4 x half> %1, i32 0
-  %.fca.0.insert = insertvalue %struct.struct1 undef, half %2, 0
+  %0 = load <4 x half>, ptr %ret, align 2
+  %1 = extractelement <4 x half> %0, i32 0
+  %.fca.0.insert = insertvalue %struct.struct1 undef, half %1, 0
   ret %struct.struct1 %.fca.0.insert
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll
index b387209d5132a..c6f955f05837d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-eh.ll
@@ -14,16 +14,16 @@
 ; }
 ;}
 
-define void @_Z4testii(i32 %a, i32 %b) #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_Z4testii(i32 %a, i32 %b) #0 personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @_Z3fooi(i32 %a)
           to label %try.cont unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
-          catch i8* null
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = tail call i8* @__cxa_begin_catch(i8* %1) #2
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = tail call ptr @__cxa_begin_catch(ptr %1) #2
   invoke void @_Z3fooi(i32 %b)
           to label %invoke.cont2 unwind label %lpad1
 
@@ -35,19 +35,19 @@ try.cont:                                         ; preds = %entry, %invoke.cont
   ret void
 
 lpad1:                                            ; preds = %lpad
-  %3 = landingpad { i8*, i32 }
+  %3 = landingpad { ptr, i32 }
           cleanup
   invoke void @__cxa_end_catch()
           to label %eh.resume unwind label %terminate.lpad
 
 eh.resume:                                        ; preds = %lpad1
-  resume { i8*, i32 } %3
+  resume { ptr, i32 } %3
 
 terminate.lpad:                                   ; preds = %lpad1
-  %4 = landingpad { i8*, i32 }
-          catch i8* null
-  %5 = extractvalue { i8*, i32 } %4, 0
-  tail call void @__clang_call_terminate(i8* %5) #3
+  %4 = landingpad { ptr, i32 }
+          catch ptr null
+  %5 = extractvalue { ptr, i32 } %4, 0
+  tail call void @__clang_call_terminate(ptr %5) #3
   unreachable
 }
 
@@ -55,13 +55,13 @@ declare void @_Z3fooi(i32) #0
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()
 
 ; Function Attrs: noinline noreturn nounwind
-define linkonce_odr hidden void @__clang_call_terminate(i8*) #1 {
-  %2 = tail call i8* @__cxa_begin_catch(i8* %0) #2
+define linkonce_odr hidden void @__clang_call_terminate(ptr) #1 {
+  %2 = tail call ptr @__cxa_begin_catch(ptr %0) #2
   tail call void @_ZSt9terminatev() #3
   unreachable
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
index e5e16848a4b0c..0e424f1e506f1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
@@ -5,10 +5,10 @@
 target datalayout = "E-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64_be-arm-none-eabi"
 
-%struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
+%struct.__va_list = type { ptr, ptr, ptr, i32, i32 }
 
-declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_end(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
+declare void @llvm.va_end(ptr) nounwind
 
 define double @callee(i32 %a, ...) {
 ; CHECK: stp
@@ -19,40 +19,37 @@ define double @callee(i32 %a, ...) {
 ; CHECK: stp
 entry:
   %vl = alloca %struct.__va_list, align 8
-  %vl1 = bitcast %struct.__va_list* %vl to i8*
-  call void @llvm.va_start(i8* %vl1)
-  %vr_offs_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 4
-  %vr_offs = load i32, i32* %vr_offs_p, align 4
+  call void @llvm.va_start(ptr %vl)
+  %vr_offs_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 4
+  %vr_offs = load i32, ptr %vr_offs_p, align 4
   %0 = icmp sgt i32 %vr_offs, -1
   br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg
 
 vaarg.maybe_reg:                                  ; preds = %entry
   %new_reg_offs = add i32 %vr_offs, 16
-  store i32 %new_reg_offs, i32* %vr_offs_p, align 4
+  store i32 %new_reg_offs, ptr %vr_offs_p, align 4
   %inreg = icmp slt i32 %new_reg_offs, 1
   br i1 %inreg, label %vaarg.in_reg, label %vaarg.on_stack
 
 vaarg.in_reg:                                     ; preds = %vaarg.maybe_reg
-  %reg_top_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 2
-  %reg_top = load i8*, i8** %reg_top_p, align 8
+  %reg_top_p = getelementptr inbounds %struct.__va_list, ptr %vl, i64 0, i32 2
+  %reg_top = load ptr, ptr %reg_top_p, align 8
   %1 = sext i32 %vr_offs to i64
-  %2 = getelementptr i8, i8* %reg_top, i64 %1
-  %3 = ptrtoint i8* %2 to i64
+  %2 = getelementptr i8, ptr %reg_top, i64 %1
+  %3 = ptrtoint ptr %2 to i64
   %align_be = add i64 %3, 8
-  %4 = inttoptr i64 %align_be to i8*
+  %4 = inttoptr i64 %align_be to ptr
   br label %vaarg.end
 
 vaarg.on_stack:                                   ; preds = %vaarg.maybe_reg, %entry
-  %stack_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 0
-  %stack = load i8*, i8** %stack_p, align 8
-  %new_stack = getelementptr i8, i8* %stack, i64 8
-  store i8* %new_stack, i8** %stack_p, align 8
+  %stack = load ptr, ptr %vl, align 8
+  %new_stack = getelementptr i8, ptr %stack, i64 8
+  store ptr %new_stack, ptr %vl, align 8
   br label %vaarg.end
 
 vaarg.end:                                        ; preds = %vaarg.on_stack, %vaarg.in_reg
-  %.sink = phi i8* [ %4, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ]
-  %5 = bitcast i8* %.sink to double*
-  %6 = load double, double* %5, align 8
-  call void @llvm.va_end(i8* %vl1)
-  ret double %6
+  %.sink = phi ptr [ %4, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ]
+  %5 = load double, ptr %.sink, align 8
+  call void @llvm.va_end(ptr %vl)
+  ret double %5
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
index a1dec896d34a2..f1dccae36b21c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
@@ -7,1266 +7,1266 @@
 
 ; CHECK-LABEL: test_i64_f64:
 declare i64 @test_i64_f64_helper(double %p)
-define void @test_i64_f64(double* %p, i64* %q) {
+define void @test_i64_f64(ptr %p, ptr %q) {
 ; CHECK-NOT: rev
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call i64 @test_i64_f64_helper(double %2)
     br label %return_bb
 return_bb:
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v1i64:
 declare i64 @test_i64_v1i64_helper(<1 x i64> %p)
-define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) {
+define void @test_i64_v1i64(ptr %p, ptr %q) {
 ; CHECK-NOT: rev
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v2f32:
 declare i64 @test_i64_v2f32_helper(<2 x float> %p)
-define void @test_i64_v2f32(<2 x float>* %p, i64* %q) {
+define void @test_i64_v2f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call i64 @test_i64_v2f32_helper(<2 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v2i32:
 declare i64 @test_i64_v2i32_helper(<2 x i32> %p)
-define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) {
+define void @test_i64_v2i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v4i16:
 declare i64 @test_i64_v4i16_helper(<4 x i16> %p)
-define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) {
+define void @test_i64_v4i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_i64_v8i8:
 declare i64 @test_i64_v8i8_helper(<8 x i8> %p)
-define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) {
+define void @test_i64_v8i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = add i64 %3, %3
-    store i64 %4, i64* %q
+    store i64 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_i64:
 declare double @test_f64_i64_helper(i64 %p)
-define void @test_f64_i64(i64* %p, double* %q) {
+define void @test_f64_i64(ptr %p, ptr %q) {
 ; CHECK-NOT: rev
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call double @test_f64_i64_helper(i64 %2)
     br label %return_bb
 return_bb:
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v1i64:
 declare double @test_f64_v1i64_helper(<1 x i64> %p)
-define void @test_f64_v1i64(<1 x i64>* %p, double* %q) {
+define void @test_f64_v1i64(ptr %p, ptr %q) {
 ; CHECK-NOT: rev
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call double @test_f64_v1i64_helper(<1 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v2f32:
 declare double @test_f64_v2f32_helper(<2 x float> %p)
-define void @test_f64_v2f32(<2 x float>* %p, double* %q) {
+define void @test_f64_v2f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call double @test_f64_v2f32_helper(<2 x float> %2)
     br label %return_bb
 return_bb:
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v2i32:
 declare double @test_f64_v2i32_helper(<2 x i32> %p)
-define void @test_f64_v2i32(<2 x i32>* %p, double* %q) {
+define void @test_f64_v2i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call double @test_f64_v2i32_helper(<2 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v4i16:
 declare double @test_f64_v4i16_helper(<4 x i16> %p)
-define void @test_f64_v4i16(<4 x i16>* %p, double* %q) {
+define void @test_f64_v4i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call double @test_f64_v4i16_helper(<4 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f64_v8i8:
 declare double @test_f64_v8i8_helper(<8 x i8> %p)
-define void @test_f64_v8i8(<8 x i8>* %p, double* %q) {
+define void @test_f64_v8i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call double @test_f64_v8i8_helper(<8 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = fadd double %3, %3
-    store double %4, double* %q
+    store double %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_i64:
 declare <1 x i64> @test_v1i64_i64_helper(i64 %p)
-define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
+define void @test_v1i64_i64(ptr %p, ptr %q) {
 ; CHECK-NOT: rev
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2)
     br label %return_bb
 return_bb:
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_f64:
 declare <1 x i64> @test_v1i64_f64_helper(double %p)
-define void @test_v1i64_f64(double* %p, <1 x i64>* %q) {
+define void @test_v1i64_f64(ptr %p, ptr %q) {
 ; CHECK-NOT: rev
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <1 x i64> @test_v1i64_f64_helper(double %2)
     br label %return_bb
 return_bb:
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v2f32:
 declare <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %p)
-define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v2f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v2i32:
 declare <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %p)
-define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v2i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v4i16:
 declare <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %p)
-define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v4i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v1i64_v8i8:
 declare <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %p)
-define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) {
+define void @test_v1i64_v8i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = add <1 x i64> %3, %3
-    store <1 x i64> %4, <1 x i64>* %q
+    store <1 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_i64:
 declare <2 x float> @test_v2f32_i64_helper(i64 %p)
-define void @test_v2f32_i64(i64* %p, <2 x float>* %q) {
+define void @test_v2f32_i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <2 x float> @test_v2f32_i64_helper(i64 %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_f64:
 declare <2 x float> @test_v2f32_f64_helper(double %p)
-define void @test_v2f32_f64(double* %p, <2 x float>* %q) {
+define void @test_v2f32_f64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <2 x float> @test_v2f32_f64_helper(double %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v1i64:
 declare <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %p)
-define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) {
+define void @test_v2f32_v1i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v2i32:
 declare <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %p)
-define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) {
+define void @test_v2f32_v2i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v4i16:
 declare <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %p)
-define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) {
+define void @test_v2f32_v4i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f32_v8i8:
 declare <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %p)
-define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) {
+define void @test_v2f32_v8i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x float> %3, %3
-    store <2 x float> %4, <2 x float>* %q
+    store <2 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_i64:
 declare <2 x i32> @test_v2i32_i64_helper(i64 %p)
-define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) {
+define void @test_v2i32_i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_f64:
 declare <2 x i32> @test_v2i32_f64_helper(double %p)
-define void @test_v2i32_f64(double* %p, <2 x i32>* %q) {
+define void @test_v2i32_f64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <2 x i32> @test_v2i32_f64_helper(double %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v1i64:
 declare <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %p)
-define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v1i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v2f32:
 declare <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %p)
-define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v2f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v4i16:
 declare <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %p)
-define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v4i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i32_v8i8:
 declare <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %p)
-define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) {
+define void @test_v2i32_v8i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: rev64 v{{[0-9]+}}.2s
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i32> %3, %3
-    store <2 x i32> %4, <2 x i32>* %q
+    store <2 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_i64:
 declare <4 x i16> @test_v4i16_i64_helper(i64 %p)
-define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) {
+define void @test_v4i16_i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_f64:
 declare <4 x i16> @test_v4i16_f64_helper(double %p)
-define void @test_v4i16_f64(double* %p, <4 x i16>* %q) {
+define void @test_v4i16_f64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <4 x i16> @test_v4i16_f64_helper(double %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v1i64:
 declare <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %p)
-define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v1i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v2f32:
 declare <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %p)
-define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v2f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v2i32:
 declare <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %p)
-define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v2i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i16_v8i8:
 declare <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %p)
-define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) {
+define void @test_v4i16_v8i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
 ; CHECK: rev64 v{{[0-9]+}}.4h
-    %1 = load <8 x i8>, <8 x i8>* %p
+    %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i16> %3, %3
-    store <4 x i16> %4, <4 x i16>* %q
+    store <4 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_i64:
 declare <8 x i8> @test_v8i8_i64_helper(i64 %p)
-define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) {
+define void @test_v8i8_i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load i64, i64* %p
+    %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_f64:
 declare <8 x i8> @test_v8i8_f64_helper(double %p)
-define void @test_v8i8_f64(double* %p, <8 x i8>* %q) {
+define void @test_v8i8_f64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load double, double* %p
+    %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <8 x i8> @test_v8i8_f64_helper(double %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v1i64:
 declare <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %p)
-define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v1i64(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load <1 x i64>, <1 x i64>* %p
+    %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v2f32:
 declare <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %p)
-define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v2f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load <2 x float>, <2 x float>* %p
+    %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v2i32:
 declare <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %p)
-define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v2i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.2s
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load <2 x i32>, <2 x i32>* %p
+    %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i8_v4i16:
 declare <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %p)
-define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) {
+define void @test_v8i8_v4i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4h
 ; CHECK: rev64 v{{[0-9]+}}.8b
-    %1 = load <4 x i16>, <4 x i16>* %p
+    %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i8> %3, %3
-    store <8 x i8> %4, <8 x i8>* %q
+    store <8 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v2f64:
 declare fp128 @test_f128_v2f64_helper(<2 x double> %p)
-define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) {
+define void @test_f128_v2f64(ptr %p, ptr %q) {
 ; CHECK: ext
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2)
     br label %return_bb
 return_bb:
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v2i64:
 declare fp128 @test_f128_v2i64_helper(<2 x i64> %p)
-define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) {
+define void @test_f128_v2i64(ptr %p, ptr %q) {
 ; CHECK: ext
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v4f32:
 declare fp128 @test_f128_v4f32_helper(<4 x float> %p)
-define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) {
+define void @test_f128_v4f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2)
     br label %return_bb
 return_bb:
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v4i32:
 declare fp128 @test_f128_v4i32_helper(<4 x i32> %p)
-define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) {
+define void @test_f128_v4i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v8i16:
 declare fp128 @test_f128_v8i16_helper(<8 x i16> %p)
-define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) {
+define void @test_f128_v8i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_f128_v16i8:
 declare fp128 @test_f128_v16i8_helper(<16 x i8> %p)
-define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) {
+define void @test_f128_v16i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = fadd fp128 %3, %3
-    store fp128 %4, fp128* %q
+    store fp128 %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_f128:
 declare <2 x double> @test_v2f64_f128_helper(fp128 %p)
-define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) {
+define void @test_v2f64_f128(ptr %p, ptr %q) {
 ; CHECK: ext
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v2i64:
 declare <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %p)
-define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) {
+define void @test_v2f64_v2i64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v4f32:
 declare <2 x double> @test_v2f64_v4f32_helper(<4 x float> %p)
-define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) {
+define void @test_v2f64_v4f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v4i32:
 declare <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %p)
-define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) {
+define void @test_v2f64_v4i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v8i16:
 declare <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %p)
-define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) {
+define void @test_v2f64_v8i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2f64_v16i8:
 declare <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %p)
-define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) {
+define void @test_v2f64_v16i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <2 x double> %3, %3
-    store <2 x double> %4, <2 x double>* %q
+    store <2 x double> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_f128:
 declare <2 x i64> @test_v2i64_f128_helper(fp128 %p)
-define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) {
+define void @test_v2i64_f128(ptr %p, ptr %q) {
 ; CHECK: ext
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v2f64:
 declare <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %p)
-define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v2f64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v4f32:
 declare <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %p)
-define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v4f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v4i32:
 declare <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %p)
-define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v4i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v8i16:
 declare <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %p)
-define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v8i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v2i64_v16i8:
 declare <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %p)
-define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) {
+define void @test_v2i64_v16i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
 ; CHECK: ext
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = add <2 x i64> %3, %3
-    store <2 x i64> %4, <2 x i64>* %q
+    store <2 x i64> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_f128:
 declare <4 x float> @test_v4f32_f128_helper(fp128 %p)
-define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) {
+define void @test_v4f32_f128(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2)
     br label %return_bb
 return_bb:
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v2f64:
 declare <4 x float> @test_v4f32_v2f64_helper(<2 x double> %p)
-define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) {
+define void @test_v4f32_v2f64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v2i64:
 declare <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %p)
-define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) {
+define void @test_v4f32_v2i64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v4i32:
 declare <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %p)
-define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) {
+define void @test_v4f32_v4i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v8i16:
 declare <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %p)
-define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) {
+define void @test_v4f32_v8i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4f32_v16i8:
 declare <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %p)
-define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) {
+define void @test_v4f32_v16i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = fadd <4 x float> %3, %3
-    store <4 x float> %4, <4 x float>* %q
+    store <4 x float> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_f128:
 declare <4 x i32> @test_v4i32_f128_helper(fp128 %p)
-define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) {
+define void @test_v4i32_f128(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v2f64:
 declare <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %p)
-define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v2f64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v2i64:
 declare <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %p)
-define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v2i64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v4f32:
 declare <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %p)
-define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v4f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v8i16:
 declare <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %p)
-define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v8i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v4i32_v16i8:
 declare <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %p)
-define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) {
+define void @test_v4i32_v16i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = add <4 x i32> %3, %3
-    store <4 x i32> %4, <4 x i32>* %q
+    store <4 x i32> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_f128:
 declare <8 x i16> @test_v8i16_f128_helper(fp128 %p)
-define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) {
+define void @test_v8i16_f128(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v2f64:
 declare <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %p)
-define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v2f64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v2i64:
 declare <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %p)
-define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v2i64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v4f32:
 declare <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %p)
-define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v4f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v4i32:
 declare <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %p)
-define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v4i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v8i16_v16i8:
 declare <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %p)
-define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) {
+define void @test_v8i16_v16i8(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
-    %1 = load <16 x i8>, <16 x i8>* %p
+    %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2)
     br label %return_bb
 return_bb:
     %4 = add <8 x i16> %3, %3
-    store <8 x i16> %4, <8 x i16>* %q
+    store <8 x i16> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_f128:
 declare <16 x i8> @test_v16i8_f128_helper(fp128 %p)
-define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) {
+define void @test_v16i8_f128(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
-    %1 = load fp128, fp128* %p
+    %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2)
     br label %return_bb
 return_bb:
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v2f64:
 declare <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %p)
-define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v2f64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
-    %1 = load <2 x double>, <2 x double>* %p
+    %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2)
     br label %return_bb
 return_bb:
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v2i64:
 declare <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %p)
-define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v2i64(ptr %p, ptr %q) {
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
-    %1 = load <2 x i64>, <2 x i64>* %p
+    %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2)
     br label %return_bb
 return_bb:
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v4f32:
 declare <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %p)
-define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v4f32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
-    %1 = load <4 x float>, <4 x float>* %p
+    %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2)
     br label %return_bb
 return_bb:
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v4i32:
 declare <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %p)
-define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v4i32(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.4s
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
-    %1 = load <4 x i32>, <4 x i32>* %p
+    %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2)
     br label %return_bb
 return_bb:
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }
 
 ; CHECK-LABEL: test_v16i8_v8i16:
 declare <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %p)
-define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) {
+define void @test_v16i8_v8i16(ptr %p, ptr %q) {
 ; CHECK: rev64 v{{[0-9]+}}.8h
 ; CHECK: ext
 ; CHECK: rev64 v{{[0-9]+}}.16b
 ; CHECK: ext
-    %1 = load <8 x i16>, <8 x i16>* %p
+    %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2)
     br label %return_bb
 return_bb:
     %4 = add <16 x i8> %3, %3
-    store <16 x i8> %4, <16 x i8>* %q
+    store <16 x i8> %4, ptr %q
     ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll b/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll
index f2b6829316007..7102e5488a8dd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-imm-offsets.ll
@@ -3,12 +3,12 @@
 
 ; Make sure large offsets aren't mistaken for valid immediate offsets.
 ; <rdar://problem/13190511>
-define void @f(i32* nocapture %p) {
+define void @f(ptr nocapture %p) {
 entry:
-  %a = ptrtoint i32* %p to i64
+  %a = ptrtoint ptr %p to i64
   %ao = add i64 %a, 25769803792
-  %b = inttoptr i64 %ao to i32*
-  store volatile i32 0, i32* %b, align 4
-  store volatile i32 0, i32* %b, align 4
+  %b = inttoptr i64 %ao to ptr
+  store volatile i32 0, ptr %b, align 4
+  store volatile i32 0, ptr %b, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-big-stack.ll b/llvm/test/CodeGen/AArch64/arm64-big-stack.ll
index c9acbc5f054cf..a51a902796530 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-stack.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-stack.ll
@@ -13,9 +13,8 @@ target triple = "arm64-apple-macosx10"
 define void @foo() nounwind ssp {
 entry:
   %buffer = alloca [33554432 x i8], align 1
-  %arraydecay = getelementptr inbounds [33554432 x i8], [33554432 x i8]* %buffer, i64 0, i64 0
-  call void @doit(i8* %arraydecay) nounwind
+  call void @doit(ptr %buffer) nounwind
   ret void
 }
 
-declare void @doit(i8*)
+declare void @doit(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
index cf72e4b1fce9b..caa5a7f9ead14 100644
--- a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
@@ -7,7 +7,7 @@
 %struct.Z = type { i8, i8, [2 x i8], i16 }
 %struct.A = type { i64, i8 }
 
-define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind optsize ssp {
+define void @foo(ptr nocapture %x, ptr nocapture %y) nounwind optsize ssp {
 ; LLC-LABEL: foo:
 ; LLC:       // %bb.0:
 ; LLC-NEXT:    ldr w8, [x0]
@@ -15,21 +15,19 @@ define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind opts
 ; LLC-NEXT:    strb w8, [x1, #4]
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @foo(
-; OPT-NEXT:    [[TMP:%.*]] = bitcast %struct.X* [[X:%.*]] to i32*
-; OPT-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP]], align 4
-; OPT-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_Y:%.*]], %struct.Y* [[Y:%.*]], i64 0, i32 1
+; OPT-NEXT:    [[TMP1:%.*]] = load i32, ptr [[X:%.*]], align 4
+; OPT-NEXT:    [[B:%.*]] = getelementptr inbounds [[STRUCT_Y:%.*]], ptr [[Y:%.*]], i64 0, i32 1
 ; OPT-NEXT:    [[BF_CLEAR:%.*]] = lshr i32 [[TMP1]], 3
 ; OPT-NEXT:    [[BF_CLEAR_LOBIT:%.*]] = and i32 [[BF_CLEAR]], 1
 ; OPT-NEXT:    [[FROMBOOL:%.*]] = trunc i32 [[BF_CLEAR_LOBIT]] to i8
-; OPT-NEXT:    store i8 [[FROMBOOL]], i8* [[B]], align 1
+; OPT-NEXT:    store i8 [[FROMBOOL]], ptr [[B]], align 1
 ; OPT-NEXT:    ret void
-  %tmp = bitcast %struct.X* %x to i32*
-  %tmp1 = load i32, i32* %tmp, align 4
-  %b = getelementptr inbounds %struct.Y, %struct.Y* %y, i64 0, i32 1
+  %tmp1 = load i32, ptr %x, align 4
+  %b = getelementptr inbounds %struct.Y, ptr %y, i64 0, i32 1
   %bf.clear = lshr i32 %tmp1, 3
   %bf.clear.lobit = and i32 %bf.clear, 1
   %frombool = trunc i32 %bf.clear.lobit to i8
-  store i8 %frombool, i8* %b, align 1
+  store i8 %frombool, ptr %b, align 1
   ret void
 }
 
@@ -65,7 +63,7 @@ define i32 @bar(i64 %cav1.coerce) nounwind {
   ret i32 %tmp1
 }
 
-define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind optsize ssp {
+define void @fct1(ptr nocapture %x, ptr nocapture %y) nounwind optsize ssp {
 ; LLC-LABEL: fct1:
 ; LLC:       // %bb.0:
 ; LLC-NEXT:    ldr x8, [x0]
@@ -73,19 +71,15 @@ define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind opt
 ; LLC-NEXT:    str x8, [x1]
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct1(
-; OPT-NEXT:    [[TMP:%.*]] = bitcast %struct.Z* [[X:%.*]] to i64*
-; OPT-NEXT:    [[TMP1:%.*]] = load i64, i64* [[TMP]], align 4
-; OPT-NEXT:    [[B1:%.*]] = bitcast %struct.A* [[Y:%.*]] to i64*
+; OPT-NEXT:    [[TMP1:%.*]] = load i64, ptr [[X:%.*]], align 4
 ; OPT-NEXT:    [[BF_CLEAR:%.*]] = lshr i64 [[TMP1]], 3
 ; OPT-NEXT:    [[BF_CLEAR_LOBIT:%.*]] = and i64 [[BF_CLEAR]], 1
-; OPT-NEXT:    store i64 [[BF_CLEAR_LOBIT]], i64* [[B1]], align 8
+; OPT-NEXT:    store i64 [[BF_CLEAR_LOBIT]], ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    ret void
-  %tmp = bitcast %struct.Z* %x to i64*
-  %tmp1 = load i64, i64* %tmp, align 4
-  %b = getelementptr inbounds %struct.A, %struct.A* %y, i64 0, i32 0
+  %tmp1 = load i64, ptr %x, align 4
   %bf.clear = lshr i64 %tmp1, 3
   %bf.clear.lobit = and i64 %bf.clear, 1
-  store i64 %bf.clear.lobit, i64* %b, align 8
+  store i64 %bf.clear.lobit, ptr %y, align 8
   ret void
 }
 
@@ -117,7 +111,7 @@ define i64 @fct3(i64 %cav1.coerce) nounwind {
   ret i64 %tmp1
 }
 
-define void @fct4(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct4(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct4:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -126,24 +120,24 @@ define void @fct4(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct4(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], -16777216
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 16777215
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
-; OPT-NEXT:    store i64 [[OR]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[OR]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, -16777216
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 16777215
   %or = or i64 %and, %and1
-  store i64 %or, i64* %y, align 8
+  store i64 %or, ptr %y, align 8
   ret void
 }
 
-define void @fct5(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct5(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct5:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -152,25 +146,25 @@ define void @fct5(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct5(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
-; OPT-NEXT:    store i32 [[OR]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[OR]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, -8
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
-  store i32 %or, i32* %y, align 8
+  store i32 %or, ptr %y, align 8
   ret void
 }
 
 ; Check if we can still catch bfm instruction when we drop some low bits
-define void @fct6(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct6(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct6:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -180,29 +174,29 @@ define void @fct6(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct6(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHR1:%.*]] = lshr i32 [[OR]], 2
-; OPT-NEXT:    store i32 [[SHR1]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[SHR1]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, -8
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %shr1 = lshr i32 %or, 2
-  store i32 %shr1, i32* %y, align 8
+  store i32 %shr1, ptr %y, align 8
   ret void
 }
 
 
 ; Check if we can still catch bfm instruction when we drop some high bits
-define void @fct7(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct7(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct7:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -212,30 +206,30 @@ define void @fct7(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct7(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i32 [[OR]], 2
-; OPT-NEXT:    store i32 [[SHL]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[SHL]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsl is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, -8
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %shl = shl i32 %or, 2
-  store i32 %shl, i32* %y, align 8
+  store i32 %shl, ptr %y, align 8
   ret void
 }
 
 
 ; Check if we can still catch bfm instruction when we drop some low bits
 ; (i64 version)
-define void @fct8(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct8(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct8:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -245,30 +239,30 @@ define void @fct8(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct8(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHR1:%.*]] = lshr i64 [[OR]], 2
-; OPT-NEXT:    store i64 [[SHR1]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[SHR1]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, -8
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 7
   %or = or i64 %and, %and1
   %shr1 = lshr i64 %or, 2
-  store i64 %shr1, i64* %y, align 8
+  store i64 %shr1, ptr %y, align 8
   ret void
 }
 
 
 ; Check if we can still catch bfm instruction when we drop some high bits
 ; (i64 version)
-define void @fct9(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct9(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct9:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -278,29 +272,29 @@ define void @fct9(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct9(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i64 [[OR]], 2
-; OPT-NEXT:    store i64 [[SHL]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[SHL]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, -8
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 7
   %or = or i64 %and, %and1
   %shl = shl i64 %or, 2
-  store i64 %shl, i64* %y, align 8
+  store i64 %shl, ptr %y, align 8
   ret void
 }
 
 ; Check if we can catch bfm instruction when lsb is 0 (i.e., no lshr)
 ; (i32 version)
-define void @fct10(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct10(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct10:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -310,27 +304,27 @@ define void @fct10(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct10(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], -8
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[X:%.*]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i32 [[OR]], 2
-; OPT-NEXT:    store i32 [[SHL]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[SHL]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsl is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, -8
   %and1 = and i32 %x, 7
   %or = or i32 %and, %and1
   %shl = shl i32 %or, 2
-  store i32 %shl, i32* %y, align 8
+  store i32 %shl, ptr %y, align 8
   ret void
 }
 
 ; Check if we can catch bfm instruction when lsb is 0 (i.e., no lshr)
 ; (i64 version)
-define void @fct11(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct11(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct11:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -340,21 +334,21 @@ define void @fct11(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct11(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], -8
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[X:%.*]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i64 [[OR]], 2
-; OPT-NEXT:    store i64 [[SHL]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[SHL]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsl is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, -8
   %and1 = and i64 %x, 7
   %or = or i64 %and, %and1
   %shl = shl i64 %or, 2
-  store i64 %shl, i64* %y, align 8
+  store i64 %shl, ptr %y, align 8
   ret void
 }
 
@@ -374,7 +368,7 @@ define zeroext i1 @fct12bis(i32 %tmp2) unnamed_addr nounwind ssp align 2 {
 
 ; Check if we can still catch bfm instruction when we drop some high bits
 ; and some low bits
-define void @fct12(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct12(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct12:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -384,28 +378,28 @@ define void @fct12(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct12(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i32 [[OR]], 2
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i32 [[SHL]], 4
-; OPT-NEXT:    store i32 [[SHR2]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[SHR2]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, -8
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %shl = shl i32 %or, 2
   %shr2 = lshr i32 %shl, 4
-  store i32 %shr2, i32* %y, align 8
+  store i32 %shr2, ptr %y, align 8
   ret void
 }
-define void @fct12_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct12_mask(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct12_mask:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -416,32 +410,32 @@ define void @fct12_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint s
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct12_mask(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[LSHR:%.*]] = lshr i32 [[OR]], 2
 ; OPT-NEXT:    [[MASK:%.*]] = and i32 [[LSHR]], 268435455
-; OPT-NEXT:    store i32 [[MASK]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[MASK]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, -8
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %lshr = lshr i32 %or, 2
   %mask = and i32 %lshr, 268435455
-  store i32 %mask, i32* %y, align 8
+  store i32 %mask, ptr %y, align 8
   ret void
 }
 
 ; Check if we can still catch bfm instruction when we drop some high bits
 ; and some low bits
 ; (i64 version)
-define void @fct13(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct13(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct13:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -451,28 +445,28 @@ define void @fct13(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct13(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i64 [[OR]], 2
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i64 [[SHL]], 4
-; OPT-NEXT:    store i64 [[SHR2]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[SHR2]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, -8
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 7
   %or = or i64 %and, %and1
   %shl = shl i64 %or, 2
   %shr2 = lshr i64 %shl, 4
-  store i64 %shr2, i64* %y, align 8
+  store i64 %shr2, ptr %y, align 8
   ret void
 }
-define void @fct13_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct13_mask(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct13_mask:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -483,32 +477,32 @@ define void @fct13_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint s
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct13_mask(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], -8
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[LSHR:%.*]] = lshr i64 [[OR]], 2
 ; OPT-NEXT:    [[MASK:%.*]] = and i64 [[LSHR]], 1152921504606846975
-; OPT-NEXT:    store i64 [[MASK]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[MASK]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, -8
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 7
   %or = or i64 %and, %and1
   %lshr = lshr i64 %or, 2
   %mask = and i64 %lshr, 1152921504606846975
-  store i64 %mask, i64* %y, align 8
+  store i64 %mask, ptr %y, align 8
   ret void
 }
 
 
 ; Check if we can still catch bfm instruction when we drop some high bits
 ; and some low bits
-define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehint ssp {
+define void @fct14(ptr nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct14:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -520,7 +514,7 @@ define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehi
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct14(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], -256
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 255
@@ -531,12 +525,12 @@ define void @fct14(i32* nocapture %y, i32 %x, i32 %x1) nounwind optsize inlinehi
 ; OPT-NEXT:    [[AND3:%.*]] = and i32 [[SHR1]], 7
 ; OPT-NEXT:    [[OR1:%.*]] = or i32 [[AND2]], [[AND3]]
 ; OPT-NEXT:    [[SHL1:%.*]] = shl i32 [[OR1]], 2
-; OPT-NEXT:    store i32 [[SHL1]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[SHL1]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
 ; lsl is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, -256
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 255
@@ -547,14 +541,14 @@ entry:
   %and3 = and i32 %shr1, 7
   %or1 = or i32 %and2, %and3
   %shl1 = shl i32 %or1, 2
-  store i32 %shl1, i32* %y, align 8
+  store i32 %shl1, ptr %y, align 8
   ret void
 }
 
 ; Check if we can still catch bfm instruction when we drop some high bits
 ; and some low bits
 ; (i64 version)
-define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehint ssp {
+define void @fct15(ptr nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct15:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -566,7 +560,7 @@ define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehi
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct15(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], -256
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 255
@@ -577,12 +571,12 @@ define void @fct15(i64* nocapture %y, i64 %x, i64 %x1) nounwind optsize inlinehi
 ; OPT-NEXT:    [[AND3:%.*]] = and i64 [[SHR1]], 7
 ; OPT-NEXT:    [[OR1:%.*]] = or i64 [[AND2]], [[AND3]]
 ; OPT-NEXT:    [[SHL1:%.*]] = shl i64 [[OR1]], 2
-; OPT-NEXT:    store i64 [[SHL1]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[SHL1]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; lsr is an alias of ubfm
 ; lsl is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, -256
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 255
@@ -593,13 +587,13 @@ entry:
   %and3 = and i64 %shr1, 7
   %or1 = or i64 %and2, %and3
   %shl1 = shl i64 %or1, 2
-  store i64 %shl1, i64* %y, align 8
+  store i64 %shl1, ptr %y, align 8
   ret void
 }
 
 ; Check if we can still catch bfm instruction when we drop some high bits
 ; and some low bits and a masking operation has to be kept
-define void @fct16(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct16(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct16:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -612,30 +606,30 @@ define void @fct16(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct16(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], 1737056
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i32 [[OR]], 2
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i32 [[SHL]], 4
-; OPT-NEXT:    store i32 [[SHR2]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[SHR2]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; Create the constant
 ; Do the masking
 ; lsr is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, 1737056
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %shl = shl i32 %or, 2
   %shr2 = lshr i32 %shl, 4
-  store i32 %shr2, i32* %y, align 8
+  store i32 %shr2, ptr %y, align 8
   ret void
 }
-define void @fct16_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
+define void @fct16_mask(ptr nocapture %y, i32 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct16_mask:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr w8, [x0]
@@ -648,27 +642,27 @@ define void @fct16_mask(i32* nocapture %y, i32 %x) nounwind optsize inlinehint s
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct16_mask(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i32, i32* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i32, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i32 [[TMP0]], 1737056
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i32 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i32 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i32 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[LSHR:%.*]] = lshr i32 [[OR]], 2
 ; OPT-NEXT:    [[MASK:%.*]] = and i32 [[LSHR]], 268435455
-; OPT-NEXT:    store i32 [[MASK]], i32* [[Y]], align 8
+; OPT-NEXT:    store i32 [[MASK]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; Create the constant
 ; Do the masking
 ; lsr is an alias of ubfm
-  %0 = load i32, i32* %y, align 8
+  %0 = load i32, ptr %y, align 8
   %and = and i32 %0, 1737056
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %lshr = lshr i32 %or, 2
   %mask = and i32 %lshr, 268435455
-  store i32 %mask, i32* %y, align 8
+  store i32 %mask, ptr %y, align 8
   ret void
 }
 
@@ -676,7 +670,7 @@ entry:
 ; Check if we can still catch bfm instruction when we drop some high bits
 ; and some low bits and a masking operation has to be kept
 ; (i64 version)
-define void @fct17(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct17(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct17:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -689,30 +683,30 @@ define void @fct17(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct17(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], 1737056
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[SHL:%.*]] = shl i64 [[OR]], 2
 ; OPT-NEXT:    [[SHR2:%.*]] = lshr i64 [[SHL]], 4
-; OPT-NEXT:    store i64 [[SHR2]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[SHR2]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; Create the constant
 ; Do the masking
 ; lsr is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, 1737056
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 7
   %or = or i64 %and, %and1
   %shl = shl i64 %or, 2
   %shr2 = lshr i64 %shl, 4
-  store i64 %shr2, i64* %y, align 8
+  store i64 %shr2, ptr %y, align 8
   ret void
 }
-define void @fct17_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
+define void @fct17_mask(ptr nocapture %y, i64 %x) nounwind optsize inlinehint ssp {
 ; LLC-LABEL: fct17_mask:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    ldr x8, [x0]
@@ -725,27 +719,27 @@ define void @fct17_mask(i64* nocapture %y, i64 %x) nounwind optsize inlinehint s
 ; LLC-NEXT:    ret
 ; OPT-LABEL: @fct17_mask(
 ; OPT-NEXT:  entry:
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[Y:%.*]], align 8
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[Y:%.*]], align 8
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[TMP0]], 1737056
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 16
 ; OPT-NEXT:    [[AND1:%.*]] = and i64 [[SHR]], 7
 ; OPT-NEXT:    [[OR:%.*]] = or i64 [[AND]], [[AND1]]
 ; OPT-NEXT:    [[LSHR:%.*]] = lshr i64 [[OR]], 2
 ; OPT-NEXT:    [[MASK:%.*]] = and i64 [[LSHR]], 1152921504606846975
-; OPT-NEXT:    store i64 [[MASK]], i64* [[Y]], align 8
+; OPT-NEXT:    store i64 [[MASK]], ptr [[Y]], align 8
 ; OPT-NEXT:    ret void
 entry:
 ; Create the constant
 ; Do the masking
 ; lsr is an alias of ubfm
-  %0 = load i64, i64* %y, align 8
+  %0 = load i64, ptr %y, align 8
   %and = and i64 %0, 1737056
   %shr = lshr i64 %x, 16
   %and1 = and i64 %shr, 7
   %or = or i64 %and, %and1
   %lshr = lshr i64 %or, 2
   %mask = and i64 %lshr, 1152921504606846975
-  store i64 %mask, i64* %y, align 8
+  store i64 %mask, ptr %y, align 8
   ret void
 }
 
@@ -809,8 +803,8 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp  {
 ; OPT-NEXT:    [[TOBOOL:%.*]] = icmp eq i64 [[X_SROA_5_0_EXTRACT_SHIFT]], 0
 ; OPT-NEXT:    br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
 ; OPT:       if.then:
-; OPT-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 [[X_SROA_5_0_EXTRACT_SHIFT]]
-; OPT-NEXT:    [[TMP0:%.*]] = load i8, i8* [[ARRAYIDX3]], align 1
+; OPT-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 [[X_SROA_5_0_EXTRACT_SHIFT]]
+; OPT-NEXT:    [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX3]], align 1
 ; OPT-NEXT:    [[CONV:%.*]] = zext i8 [[TMP0]] to i32
 ; OPT-NEXT:    br label [[RETURN:%.*]]
 ; OPT:       if.end:
@@ -821,8 +815,8 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp  {
 ; OPT:       if.then7:
 ; OPT-NEXT:    [[TMP2:%.*]] = lshr i64 [[ARG1]], 32
 ; OPT-NEXT:    [[IDXPROM10:%.*]] = and i64 [[TMP2]], 65535
-; OPT-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 [[IDXPROM10]]
-; OPT-NEXT:    [[TMP3:%.*]] = load i8, i8* [[ARRAYIDX11]], align 1
+; OPT-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 [[IDXPROM10]]
+; OPT-NEXT:    [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX11]], align 1
 ; OPT-NEXT:    [[CONV12:%.*]] = zext i8 [[TMP3]] to i32
 ; OPT-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV12]], 16
 ; OPT-NEXT:    br label [[RETURN]]
@@ -834,8 +828,8 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp  {
 ; OPT:       if.then17:
 ; OPT-NEXT:    [[TMP6:%.*]] = lshr i64 [[ARG1]], 16
 ; OPT-NEXT:    [[IDXPROM20:%.*]] = and i64 [[TMP6]], 65535
-; OPT-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 [[IDXPROM20]]
-; OPT-NEXT:    [[TMP7:%.*]] = load i8, i8* [[ARRAYIDX21]], align 1
+; OPT-NEXT:    [[ARRAYIDX21:%.*]] = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 [[IDXPROM20]]
+; OPT-NEXT:    [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX21]], align 1
 ; OPT-NEXT:    [[CONV22:%.*]] = zext i8 [[TMP7]] to i32
 ; OPT-NEXT:    [[ADD23:%.*]] = add nsw i32 [[CONV22]], 32
 ; OPT-NEXT:    br label [[RETURN]]
@@ -851,8 +845,8 @@ entry:
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %arrayidx3 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift
-  %0 = load i8, i8* %arrayidx3, align 1
+  %arrayidx3 = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift
+  %0 = load i8, ptr %arrayidx3, align 1
   %conv = zext i8 %0 to i32
   br label %return
 
@@ -865,8 +859,8 @@ if.then7:                                         ; preds = %if.end
 ; "and" should be combined to "ubfm" while "ubfm" should be removed by cse.
 ; So neither of them should be in the assemble code.
   %idxprom10 = and i64 %x.sroa.3.0.extract.shift, 65535
-  %arrayidx11 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom10
-  %1 = load i8, i8* %arrayidx11, align 1
+  %arrayidx11 = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 %idxprom10
+  %1 = load i8, ptr %arrayidx11, align 1
   %conv12 = zext i8 %1 to i32
   %add = add nsw i32 %conv12, 16
   br label %return
@@ -879,8 +873,8 @@ if.then17:                                        ; preds = %if.end13
 ; "and" should be combined to "ubfm" while "ubfm" should be removed by cse.
 ; So neither of them should be in the assemble code.
   %idxprom20 = and i64 %x.sroa.1.0.extract.shift, 65535
-  %arrayidx21 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom20
-  %2 = load i8, i8* %arrayidx21, align 1
+  %arrayidx21 = getelementptr inbounds [65536 x i8], ptr @first_ones, i64 0, i64 %idxprom20
+  %2 = load i8, ptr %arrayidx21, align 1
   %conv22 = zext i8 %2 to i32
   %add23 = add nsw i32 %conv22, 32
   br label %return
@@ -950,14 +944,14 @@ define i64 @fct21(i64 %x) {
 ; OPT-NEXT:  entry:
 ; OPT-NEXT:    [[SHR:%.*]] = lshr i64 [[X:%.*]], 4
 ; OPT-NEXT:    [[AND:%.*]] = and i64 [[SHR]], 15
-; OPT-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 [[AND]]
-; OPT-NEXT:    [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8
+; OPT-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x [64 x i64]], ptr @arr, i64 0, i64 0, i64 [[AND]]
+; OPT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
 ; OPT-NEXT:    ret i64 [[TMP0]]
 entry:
   %shr = lshr i64 %x, 4
   %and = and i64 %shr, 15
-  %arrayidx = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 %and
-  %0 = load i64, i64* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds [8 x [64 x i64]], ptr @arr, i64 0, i64 0, i64 %and
+  %0 = load i64, ptr %arrayidx, align 8
   ret i64 %0
 }
 
@@ -992,7 +986,7 @@ define i16 @test_ignored_rightbits(i32 %dst, i32 %in) {
 ; The following test excercises the case where we have a BFI
 ; instruction with the same input in both operands. We need to
 ; track the useful bits through both operands.
-define void @sameOperandBFI(i64 %src, i64 %src2, i16 *%ptr) {
+define void @sameOperandBFI(i64 %src, i64 %src2, ptr %ptr) {
 ; LLC-LABEL: sameOperandBFI:
 ; LLC:       // %bb.0: // %entry
 ; LLC-NEXT:    cbnz wzr, .LBB30_2
@@ -1018,7 +1012,7 @@ define void @sameOperandBFI(i64 %src, i64 %src2, i16 *%ptr) {
 ; OPT-NEXT:    [[BFIRHS:%.*]] = shl nuw nsw i32 [[BFISOURCE]], 4
 ; OPT-NEXT:    [[BFI:%.*]] = or i32 [[BFIRHS]], [[BFISOURCE]]
 ; OPT-NEXT:    [[BFITRUNC:%.*]] = trunc i32 [[BFI]] to i16
-; OPT-NEXT:    store i16 [[BFITRUNC]], i16* [[PTR:%.*]], align 4
+; OPT-NEXT:    store i16 [[BFITRUNC]], ptr [[PTR:%.*]], align 4
 ; OPT-NEXT:    br label [[END]]
 ; OPT:       end:
 ; OPT-NEXT:    ret void
@@ -1036,7 +1030,7 @@ if.else:
   %BFIRHS = shl nuw nsw i32 %BFISource, 4   ; ...0ABCD0000
   %BFI = or i32 %BFIRHS, %BFISource         ; ...0ABCDABCD
   %BFItrunc = trunc i32 %BFI to i16
-  store i16 %BFItrunc, i16* %ptr, align 4
+  store i16 %BFItrunc, ptr %ptr, align 4
   br label %end
 
 end:

diff  --git a/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll b/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll
index 68b8fcbefc5ab..69b1872be87db 100644
--- a/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll
@@ -24,10 +24,10 @@ entry:
 ; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g3:[[DEST_LBL]]
 
   %recover = alloca i64, align 8
-  store volatile i64 ptrtoint (i8* blockaddress(@t, %mylabel) to i64), i64* %recover, align 8
+  store volatile i64 ptrtoint (ptr blockaddress(@t, %mylabel) to i64), ptr %recover, align 8
   br label %mylabel
 
 mylabel:
-  %tmp = load volatile i64, i64* %recover, align 8
+  %tmp = load volatile i64, ptr %recover, align 8
   ret i64 %tmp
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-build-vector.ll b/llvm/test/CodeGen/AArch64/arm64-build-vector.ll
index 0dc369c90761f..9b4660c94790c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-build-vector.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-build-vector.ll
@@ -53,15 +53,14 @@ define <8 x i16> @concat_2_build_vector(<4 x i16> %in0) {
 ; an equivalent integer vector and BITCAST-ing that. This case checks that
 ; normalizing the vector generates a valid result. The choice of the
 ; constant prevents earlier passes from replacing the BUILD_VECTOR.
-define void @widen_f16_build_vector(half* %addr) {
+define void @widen_f16_build_vector(ptr %addr) {
 ; CHECK-LABEL: widen_f16_build_vector:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #13294
 ; CHECK-NEXT:    movk w8, #13294, lsl #16
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %1 = bitcast half* %addr to <2 x half>*
-  store <2 x half> <half 0xH33EE, half 0xH33EE>, <2 x half>* %1, align 2
+  store <2 x half> <half 0xH33EE, half 0xH33EE>, ptr %addr, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll b/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll
index 7d5684778d060..63f4cb0c1fdaf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-builtins-linux.ll
@@ -5,9 +5,9 @@
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+tpidr-el3 | FileCheck --check-prefix=USEEL3 %s
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.thread.pointer() #1
+declare ptr @llvm.thread.pointer() #1
 
-define i8* @thread_pointer() {
+define ptr @thread_pointer() {
 ; CHECK: thread_pointer:
 ; CHECK: mrs {{x[0-9]+}}, TPIDR_EL0
 ; USEEL1: thread_pointer:
@@ -16,6 +16,6 @@ define i8* @thread_pointer() {
 ; USEEL2: mrs {{x[0-9]+}}, TPIDR_EL2
 ; USEEL3: thread_pointer:
 ; USEEL3: mrs {{x[0-9]+}}, TPIDR_EL3
-  %1 = tail call i8* @llvm.thread.pointer()
-  ret i8* %1
+  %1 = tail call ptr @llvm.thread.pointer()
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
index 9b6d1f3a1867e..7745f8dab1c2b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 ; RUN: llc -global-isel < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
- at t = weak global i32 ()* null
+ at t = weak global ptr null
 @x = external global i32, align 4
 
 define void @t2() {
@@ -10,7 +10,7 @@ define void @t2() {
 ; CHECK: ldr	x[[ADDR:[0-9]+]], [x[[GOTADDR]], _t at GOTPAGEOFF]
 ; CHECK: ldr	x[[DEST:[0-9]+]], [x[[ADDR]]]
 ; CHECK: br	x[[DEST]]
-  %tmp = load i32 ()*, i32 ()** @t
+  %tmp = load ptr, ptr @t
   %tmp.upgrd.2 = tail call i32 %tmp()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll b/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll
index 2f5d16b257952..06c496dbcafe1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll
@@ -3,7 +3,7 @@
 ; Zero truncation is not necessary when the values are extended properly
 ; already.
 
- at block = common global i8* null, align 8
+ at block = common global ptr null, align 8
 
 define zeroext i8 @foo(i32 %i1, i32 %i2) {
 ; CHECK-LABEL: foo:
@@ -11,12 +11,12 @@ define zeroext i8 @foo(i32 %i1, i32 %i2) {
 ; CHECK-NOT: and
 entry:
   %idxprom = sext i32 %i1 to i64
-  %0 = load i8*, i8** @block, align 8
-  %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
-  %1 = load i8, i8* %arrayidx, align 1
+  %0 = load ptr, ptr @block, align 8
+  %arrayidx = getelementptr inbounds i8, ptr %0, i64 %idxprom
+  %1 = load i8, ptr %arrayidx, align 1
   %idxprom1 = sext i32 %i2 to i64
-  %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
-  %2 = load i8, i8* %arrayidx2, align 1
+  %arrayidx2 = getelementptr inbounds i8, ptr %0, i64 %idxprom1
+  %2 = load i8, ptr %arrayidx2, align 1
   %cmp = icmp eq i8 %1, %2
   br i1 %cmp, label %return, label %if.then
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
index fa2343152f72b..358a2c3404815 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
@@ -3,10 +3,10 @@ target triple = "arm64-apple-ios7.0.0"
 
 @channelColumns = external global i64
 @channelTracks = external global i64
- at mazeRoute = external hidden unnamed_addr global i8*, align 8
- at TOP = external global i64*
- at BOT = external global i64*
- at netsAssign = external global i64*
+ at mazeRoute = external hidden unnamed_addr global ptr, align 8
+ at TOP = external global ptr
+ at BOT = external global ptr
+ at netsAssign = external global ptr
 
 ; Function from yacr2/maze.c
 ; The branch at the end of %if.then is driven by %cmp5 and %cmp6.
@@ -21,7 +21,7 @@ target triple = "arm64-apple-ios7.0.0"
 ; CHECK-NEXT: b.lo
 define i32 @Maze1() nounwind ssp {
 entry:
-  %0 = load i64, i64* @channelColumns, align 8, !tbaa !0
+  %0 = load i64, ptr @channelColumns, align 8, !tbaa !0
   %cmp90 = icmp eq i64 %0, 0
   br i1 %cmp90, label %for.end, label %for.body
 
@@ -29,51 +29,51 @@ for.body:                                         ; preds = %for.inc, %entry
   %1 = phi i64 [ %0, %entry ], [ %37, %for.inc ]
   %i.092 = phi i64 [ 1, %entry ], [ %inc53, %for.inc ]
   %numLeft.091 = phi i32 [ 0, %entry ], [ %numLeft.1, %for.inc ]
-  %2 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
-  %arrayidx = getelementptr inbounds i8, i8* %2, i64 %i.092
-  %3 = load i8, i8* %arrayidx, align 1, !tbaa !1
+  %2 = load ptr, ptr @mazeRoute, align 8, !tbaa !3
+  %arrayidx = getelementptr inbounds i8, ptr %2, i64 %i.092
+  %3 = load i8, ptr %arrayidx, align 1, !tbaa !1
   %tobool = icmp eq i8 %3, 0
   br i1 %tobool, label %for.inc, label %if.then
 
 if.then:                                          ; preds = %for.body
-  %4 = load i64*, i64** @TOP, align 8, !tbaa !3
-  %arrayidx1 = getelementptr inbounds i64, i64* %4, i64 %i.092
-  %5 = load i64, i64* %arrayidx1, align 8, !tbaa !0
-  %6 = load i64*, i64** @netsAssign, align 8, !tbaa !3
-  %arrayidx2 = getelementptr inbounds i64, i64* %6, i64 %5
-  %7 = load i64, i64* %arrayidx2, align 8, !tbaa !0
-  %8 = load i64*, i64** @BOT, align 8, !tbaa !3
-  %arrayidx3 = getelementptr inbounds i64, i64* %8, i64 %i.092
-  %9 = load i64, i64* %arrayidx3, align 8, !tbaa !0
-  %arrayidx4 = getelementptr inbounds i64, i64* %6, i64 %9
-  %10 = load i64, i64* %arrayidx4, align 8, !tbaa !0
+  %4 = load ptr, ptr @TOP, align 8, !tbaa !3
+  %arrayidx1 = getelementptr inbounds i64, ptr %4, i64 %i.092
+  %5 = load i64, ptr %arrayidx1, align 8, !tbaa !0
+  %6 = load ptr, ptr @netsAssign, align 8, !tbaa !3
+  %arrayidx2 = getelementptr inbounds i64, ptr %6, i64 %5
+  %7 = load i64, ptr %arrayidx2, align 8, !tbaa !0
+  %8 = load ptr, ptr @BOT, align 8, !tbaa !3
+  %arrayidx3 = getelementptr inbounds i64, ptr %8, i64 %i.092
+  %9 = load i64, ptr %arrayidx3, align 8, !tbaa !0
+  %arrayidx4 = getelementptr inbounds i64, ptr %6, i64 %9
+  %10 = load i64, ptr %arrayidx4, align 8, !tbaa !0
   %cmp5 = icmp ugt i64 %i.092, 1
   %cmp6 = icmp ugt i64 %10, 1
   %or.cond = and i1 %cmp5, %cmp6
   br i1 %or.cond, label %land.lhs.true7, label %if.else
 
 land.lhs.true7:                                   ; preds = %if.then
-  %11 = load i64, i64* @channelTracks, align 8, !tbaa !0
+  %11 = load i64, ptr @channelTracks, align 8, !tbaa !0
   %add = add i64 %11, 1
   %call = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add, i64 %10, i64 0, i64 %7, i32 -1, i32 -1)
   %tobool8 = icmp eq i32 %call, 0
   br i1 %tobool8, label %land.lhs.true7.if.else_crit_edge, label %if.then9
 
 land.lhs.true7.if.else_crit_edge:                 ; preds = %land.lhs.true7
-  %.pre = load i64, i64* @channelColumns, align 8, !tbaa !0
+  %.pre = load i64, ptr @channelColumns, align 8, !tbaa !0
   br label %if.else
 
 if.then9:                                         ; preds = %land.lhs.true7
-  %12 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
-  %arrayidx10 = getelementptr inbounds i8, i8* %12, i64 %i.092
-  store i8 0, i8* %arrayidx10, align 1, !tbaa !1
-  %13 = load i64*, i64** @TOP, align 8, !tbaa !3
-  %arrayidx11 = getelementptr inbounds i64, i64* %13, i64 %i.092
-  %14 = load i64, i64* %arrayidx11, align 8, !tbaa !0
+  %12 = load ptr, ptr @mazeRoute, align 8, !tbaa !3
+  %arrayidx10 = getelementptr inbounds i8, ptr %12, i64 %i.092
+  store i8 0, ptr %arrayidx10, align 1, !tbaa !1
+  %13 = load ptr, ptr @TOP, align 8, !tbaa !3
+  %arrayidx11 = getelementptr inbounds i64, ptr %13, i64 %i.092
+  %14 = load i64, ptr %arrayidx11, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %14)
-  %15 = load i64*, i64** @BOT, align 8, !tbaa !3
-  %arrayidx12 = getelementptr inbounds i64, i64* %15, i64 %i.092
-  %16 = load i64, i64* %arrayidx12, align 8, !tbaa !0
+  %15 = load ptr, ptr @BOT, align 8, !tbaa !3
+  %arrayidx12 = getelementptr inbounds i64, ptr %15, i64 %i.092
+  %16 = load i64, ptr %arrayidx12, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %16)
   br label %for.inc
 
@@ -84,23 +84,23 @@ if.else:                                          ; preds = %land.lhs.true7.if.e
   br i1 %or.cond89, label %land.lhs.true16, label %if.else24
 
 land.lhs.true16:                                  ; preds = %if.else
-  %18 = load i64, i64* @channelTracks, align 8, !tbaa !0
+  %18 = load i64, ptr @channelTracks, align 8, !tbaa !0
   %add17 = add i64 %18, 1
   %call18 = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add17, i64 %10, i64 0, i64 %7, i32 1, i32 -1)
   %tobool19 = icmp eq i32 %call18, 0
   br i1 %tobool19, label %if.else24, label %if.then20
 
 if.then20:                                        ; preds = %land.lhs.true16
-  %19 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
-  %arrayidx21 = getelementptr inbounds i8, i8* %19, i64 %i.092
-  store i8 0, i8* %arrayidx21, align 1, !tbaa !1
-  %20 = load i64*, i64** @TOP, align 8, !tbaa !3
-  %arrayidx22 = getelementptr inbounds i64, i64* %20, i64 %i.092
-  %21 = load i64, i64* %arrayidx22, align 8, !tbaa !0
+  %19 = load ptr, ptr @mazeRoute, align 8, !tbaa !3
+  %arrayidx21 = getelementptr inbounds i8, ptr %19, i64 %i.092
+  store i8 0, ptr %arrayidx21, align 1, !tbaa !1
+  %20 = load ptr, ptr @TOP, align 8, !tbaa !3
+  %arrayidx22 = getelementptr inbounds i64, ptr %20, i64 %i.092
+  %21 = load i64, ptr %arrayidx22, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %21)
-  %22 = load i64*, i64** @BOT, align 8, !tbaa !3
-  %arrayidx23 = getelementptr inbounds i64, i64* %22, i64 %i.092
-  %23 = load i64, i64* %arrayidx23, align 8, !tbaa !0
+  %22 = load ptr, ptr @BOT, align 8, !tbaa !3
+  %arrayidx23 = getelementptr inbounds i64, ptr %22, i64 %i.092
+  %23 = load i64, ptr %arrayidx23, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %23)
   br label %for.inc
 
@@ -108,7 +108,7 @@ if.else24:                                        ; preds = %land.lhs.true16, %i
   br i1 %cmp5, label %land.lhs.true26, label %if.else36
 
 land.lhs.true26:                                  ; preds = %if.else24
-  %24 = load i64, i64* @channelTracks, align 8, !tbaa !0
+  %24 = load i64, ptr @channelTracks, align 8, !tbaa !0
   %cmp27 = icmp ult i64 %7, %24
   br i1 %cmp27, label %land.lhs.true28, label %if.else36
 
@@ -119,26 +119,26 @@ land.lhs.true28:                                  ; preds = %land.lhs.true26
   br i1 %tobool31, label %if.else36, label %if.then32
 
 if.then32:                                        ; preds = %land.lhs.true28
-  %25 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
-  %arrayidx33 = getelementptr inbounds i8, i8* %25, i64 %i.092
-  store i8 0, i8* %arrayidx33, align 1, !tbaa !1
-  %26 = load i64*, i64** @TOP, align 8, !tbaa !3
-  %arrayidx34 = getelementptr inbounds i64, i64* %26, i64 %i.092
-  %27 = load i64, i64* %arrayidx34, align 8, !tbaa !0
+  %25 = load ptr, ptr @mazeRoute, align 8, !tbaa !3
+  %arrayidx33 = getelementptr inbounds i8, ptr %25, i64 %i.092
+  store i8 0, ptr %arrayidx33, align 1, !tbaa !1
+  %26 = load ptr, ptr @TOP, align 8, !tbaa !3
+  %arrayidx34 = getelementptr inbounds i64, ptr %26, i64 %i.092
+  %27 = load i64, ptr %arrayidx34, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %27)
-  %28 = load i64*, i64** @BOT, align 8, !tbaa !3
-  %arrayidx35 = getelementptr inbounds i64, i64* %28, i64 %i.092
-  %29 = load i64, i64* %arrayidx35, align 8, !tbaa !0
+  %28 = load ptr, ptr @BOT, align 8, !tbaa !3
+  %arrayidx35 = getelementptr inbounds i64, ptr %28, i64 %i.092
+  %29 = load i64, ptr %arrayidx35, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %29)
   br label %for.inc
 
 if.else36:                                        ; preds = %land.lhs.true28, %land.lhs.true26, %if.else24
-  %30 = load i64, i64* @channelColumns, align 8, !tbaa !0
+  %30 = load i64, ptr @channelColumns, align 8, !tbaa !0
   %cmp37 = icmp ult i64 %i.092, %30
   br i1 %cmp37, label %land.lhs.true38, label %if.else48
 
 land.lhs.true38:                                  ; preds = %if.else36
-  %31 = load i64, i64* @channelTracks, align 8, !tbaa !0
+  %31 = load i64, ptr @channelTracks, align 8, !tbaa !0
   %cmp39 = icmp ult i64 %7, %31
   br i1 %cmp39, label %land.lhs.true40, label %if.else48
 
@@ -149,16 +149,16 @@ land.lhs.true40:                                  ; preds = %land.lhs.true38
   br i1 %tobool43, label %if.else48, label %if.then44
 
 if.then44:                                        ; preds = %land.lhs.true40
-  %32 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
-  %arrayidx45 = getelementptr inbounds i8, i8* %32, i64 %i.092
-  store i8 0, i8* %arrayidx45, align 1, !tbaa !1
-  %33 = load i64*, i64** @TOP, align 8, !tbaa !3
-  %arrayidx46 = getelementptr inbounds i64, i64* %33, i64 %i.092
-  %34 = load i64, i64* %arrayidx46, align 8, !tbaa !0
+  %32 = load ptr, ptr @mazeRoute, align 8, !tbaa !3
+  %arrayidx45 = getelementptr inbounds i8, ptr %32, i64 %i.092
+  store i8 0, ptr %arrayidx45, align 1, !tbaa !1
+  %33 = load ptr, ptr @TOP, align 8, !tbaa !3
+  %arrayidx46 = getelementptr inbounds i64, ptr %33, i64 %i.092
+  %34 = load i64, ptr %arrayidx46, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %34)
-  %35 = load i64*, i64** @BOT, align 8, !tbaa !3
-  %arrayidx47 = getelementptr inbounds i64, i64* %35, i64 %i.092
-  %36 = load i64, i64* %arrayidx47, align 8, !tbaa !0
+  %35 = load ptr, ptr @BOT, align 8, !tbaa !3
+  %arrayidx47 = getelementptr inbounds i64, ptr %35, i64 %i.092
+  %36 = load i64, ptr %arrayidx47, align 8, !tbaa !0
   tail call fastcc void @CleanNet(i64 %36)
   br label %for.inc
 
@@ -169,7 +169,7 @@ if.else48:                                        ; preds = %land.lhs.true40, %l
 for.inc:                                          ; preds = %if.else48, %if.then44, %if.then32, %if.then20, %if.then9, %for.body
   %numLeft.1 = phi i32 [ %numLeft.091, %if.then9 ], [ %numLeft.091, %if.then20 ], [ %numLeft.091, %if.then32 ], [ %numLeft.091, %if.then44 ], [ %inc, %if.else48 ], [ %numLeft.091, %for.body ]
   %inc53 = add i64 %i.092, 1
-  %37 = load i64, i64* @channelColumns, align 8, !tbaa !0
+  %37 = load i64, ptr @channelColumns, align 8, !tbaa !0
   %cmp = icmp ugt i64 %inc53, %37
   br i1 %cmp, label %for.end, label %for.body
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
index e36aa946323e7..789dd66b7103d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -426,7 +426,7 @@ if.end:
 declare i32 @foo()
 
 %str1 = type { %str2 }
-%str2 = type { [24 x i8], i8*, i32, %str1*, i32, [4 x i8], %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, %str1*, i8*, i8, i8*, %str1*, i8* }
+%str2 = type { [24 x i8], ptr, i32, ptr, i32, [4 x i8], ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i8, ptr, ptr, ptr }
 
 ; Test case distilled from 126.gcc.
 ; The phi in sw.bb.i.i gets multiple operands for the %entry predecessor.
@@ -449,11 +449,10 @@ if.end85:
   ret void
 
 sw.bb.i.i:
-  %ref.tr.i.i = phi %str1* [ %0, %sw.bb.i.i ], [ undef, %entry ]
-  %operands.i.i = getelementptr inbounds %str1, %str1* %ref.tr.i.i, i64 0, i32 0, i32 2
-  %arrayidx.i.i = bitcast i32* %operands.i.i to %str1**
-  %0 = load %str1*, %str1** %arrayidx.i.i, align 8
-  %code1.i.i.phi.trans.insert = getelementptr inbounds %str1, %str1* %0, i64 0, i32 0, i32 0, i64 16
+  %ref.tr.i.i = phi ptr [ %0, %sw.bb.i.i ], [ undef, %entry ]
+  %operands.i.i = getelementptr inbounds %str1, ptr %ref.tr.i.i, i64 0, i32 0, i32 2
+  %0 = load ptr, ptr %operands.i.i, align 8
+  %code1.i.i.phi.trans.insert = getelementptr inbounds %str1, ptr %0, i64 0, i32 0, i32 0, i64 16
   br label %sw.bb.i.i
 }
 
@@ -690,7 +689,7 @@ define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
   %or = or i1 %c0, %c1
   %sel = select i1 %or, i64 0, i64 %r
   %ext = sext i1 %or to i32
-  store volatile i32 %ext, i32* @g
+  store volatile i32 %ext, ptr @g
   ret i64 %sel
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll b/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll
index d5064f6d16e63..4b9591ffeb2ba 100644
--- a/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-coalesce-ext.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s
 ; Check that the peephole optimizer knows about sext and zext instructions.
 ; CHECK: test1sext
-define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
+define i32 @test1sext(i64 %A, i64 %B, ptr %P, ptr %P2) nounwind {
   %C = add i64 %A, %B
   ; CHECK: add x[[SUM:[0-9]+]], x0, x1
   %D = trunc i64 %C to i32
   %E = shl i64 %C, 32
   %F = ashr i64 %E, 32
   ; CHECK: sxtw x[[EXT:[0-9]+]], w[[SUM]]
-  store volatile i64 %F, i64 *%P2
+  store volatile i64 %F, ptr %P2
   ; CHECK: str x[[EXT]]
-  store volatile i32 %D, i32* %P
+  store volatile i32 %D, ptr %P
   ; Reuse low bits of extended register, don't extend live range of SUM.
   ; CHECK: str w[[SUM]]
   ret i32 %D

diff  --git a/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll b/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
index 3d23dcd3cd294..889a76b37ebe1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
@@ -6,18 +6,18 @@
 ; so that SelectionDAG can select it with the load.
 ;
 ; OPTALL-LABEL: @foo
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ; OPTALL-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
-; OPTALL: store i32 [[ZEXT]], i32* %q
+; OPTALL: store i32 [[ZEXT]], ptr %q
 ; OPTALL: ret
-define void @foo(i8* %p, i32* %q) {
+define void @foo(ptr %p, ptr %q) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = zext i8 %t to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -26,23 +26,23 @@ false:
 ; Check that we manage to form a zextload is an operation with only one
 ; argument to explicitly extend is in the way.
 ; OPTALL-LABEL: @promoteOneArg
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
 ; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT]], 2
 ; Make sure the operation is not promoted when the promotion pass is disabled.
 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], 2
 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
-; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: store i32 [[RES]], ptr %q
 ; OPTALL: ret
-define void @promoteOneArg(i8* %p, i32* %q) {
+define void @promoteOneArg(ptr %p, ptr %q) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %add = add nuw i8 %t, 2
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = zext i8 %add to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -52,22 +52,22 @@ false:
 ; argument to explicitly extend is in the way.
 ; Version with sext.
 ; OPTALL-LABEL: @promoteOneArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
 ; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SEXT]], 2
 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], 2
 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i32
-; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: store i32 [[RES]], ptr %q
 ; OPTALL: ret
-define void @promoteOneArgSExt(i8* %p, i32* %q) {
+define void @promoteOneArgSExt(ptr %p, ptr %q) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %add = add nsw i8 %t, 2
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = sext i8 %add to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -84,7 +84,7 @@ false:
 ; transformation, the regular heuristic does not apply the optimization.
 ;
 ; OPTALL-LABEL: @promoteTwoArgZext
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
@@ -96,17 +96,17 @@ false:
 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8 [[LD]], %b
 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
 ;
-; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: store i32 [[RES]], ptr %q
 ; OPTALL: ret
-define void @promoteTwoArgZext(i8* %p, i32* %q, i8 %b) {
+define void @promoteTwoArgZext(ptr %p, ptr %q, i8 %b) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %add = add nuw i8 %t, %b
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = zext i8 %add to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -116,7 +116,7 @@ false:
 ; arguments to explicitly extend is in the way.
 ; Version with sext.
 ; OPTALL-LABEL: @promoteTwoArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[SEXTLD:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i8 %b to i32
@@ -127,17 +127,17 @@ false:
 ;
 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], %b
 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i32
-; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: store i32 [[RES]], ptr %q
 ; OPTALL: ret
-define void @promoteTwoArgSExt(i8* %p, i32* %q, i8 %b) {
+define void @promoteTwoArgSExt(ptr %p, ptr %q, i8 %b) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %add = add nsw i8 %t, %b
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = sext i8 %add to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -146,7 +146,7 @@ false:
 ; Check that we do not a zextload if we need to introduce more than
 ; one additional extension.
 ; OPTALL-LABEL: @promoteThreeArgZext
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
@@ -162,18 +162,18 @@ false:
 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i8
 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i8 [[ADD]] to i32
 ;
-; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: store i32 [[RES]], ptr %q
 ; OPTALL: ret
-define void @promoteThreeArgZext(i8* %p, i32* %q, i8 %b, i8 %c) {
+define void @promoteThreeArgZext(ptr %p, ptr %q, i8 %b, i8 %c) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %tmp = add nuw i8 %t, %b
   %add = add nuw i8 %tmp, %c
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = zext i8 %add to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -182,7 +182,7 @@ false:
 ; Check that we manage to form a zextload after promoting and merging
 ; two extensions.
 ; OPTALL-LABEL: @promoteMergeExtArgZExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i16 %b to i32
@@ -196,18 +196,18 @@ false:
 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw i16 [[ZEXTLD]], %b
 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = zext i16 [[ADD]] to i32
 ;
-; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: store i32 [[RES]], ptr %q
 ; OPTALL: ret
-define void @promoteMergeExtArgZExt(i8* %p, i32* %q, i16 %b) {
+define void @promoteMergeExtArgZExt(ptr %p, ptr %q, i16 %b) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %ext = zext i8 %t to i16
   %add = add nuw i16 %ext, %b
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = zext i16 %add to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -217,7 +217,7 @@ false:
 ; two extensions.
 ; Version with sext.
 ; OPTALL-LABEL: @promoteMergeExtArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
 ; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = sext i16 %b to i32
@@ -230,18 +230,18 @@ false:
 ; DISABLE: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i16
 ; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i16 [[ZEXTLD]], %b
 ; DISABLE: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32
-; OPTALL: store i32 [[RES]], i32* %q
+; OPTALL: store i32 [[RES]], ptr %q
 ; OPTALL: ret
-define void @promoteMergeExtArgSExt(i8* %p, i32* %q, i16 %b) {
+define void @promoteMergeExtArgSExt(ptr %p, ptr %q, i16 %b) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %ext = zext i8 %t to i16
   %add = add nsw i16 %ext, %b
   %a = icmp slt i8 %t, 20
   br i1 %a, label %true, label %false
 true:
   %s = sext i16 %add to i32
-  store i32 %s, i32* %q
+  store i32 %s, ptr %q
   ret void
 false:
   ret void
@@ -277,10 +277,10 @@ false:
 ; 3 identical zext of %ld. The extensions will be CSE'ed by SDag.
 ;
 ; OPTALL-LABEL: @severalPromotions
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %addr1
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %addr1
 ; OPT-NEXT: [[ZEXTLD1_1:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; OPT-NEXT: [[ZEXTLD1_2:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
-; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, i32* %addr2
+; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, ptr %addr2
 ; OPT-NEXT: [[SEXTLD2:%[a-zA-Z_0-9-]+]] = sext i32 [[LD2]] to i64
 ; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_3]]
@@ -298,10 +298,10 @@ false:
 ;
 ; OPTALL: call void @dummy(i64 [[RES]], i64 [[RESZA]], i64 [[RESB]])
 ; OPTALL: ret
-define void @severalPromotions(i8* %addr1, i32* %addr2, i8 %a, i32 %b) {
-  %ld = load i8, i8* %addr1
+define void @severalPromotions(ptr %addr1, ptr %addr2, i8 %a, i32 %b) {
+  %ld = load i8, ptr %addr1
   %zextld = zext i8 %ld to i32
-  %ld2 = load i32, i32* %addr2
+  %ld2 = load i32, ptr %addr2
   %add = add nsw i32 %ld2, %zextld
   %sextadd = sext i32 %add to i64
   %zexta = zext i8 %a to i32
@@ -335,51 +335,51 @@ entry:
 ; to an instruction.
 ; This used to cause a crash.
 ; OPTALL-LABEL: @promotionOfArgEndsUpInValue
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16, i16* %addr
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16, ptr %addr
 ;
 ; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i16 [[LD]] to i32
-; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @c, i64 0, i64 1), i32* @a) to i32)
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i32)
 ;
-; DISABLE-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw nsw i16 [[LD]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @c, i64 0, i64 1), i32* @a) to i16)
+; DISABLE-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw nsw i16 [[LD]], zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i16)
 ; DISABLE-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32
 ;
 ; OPTALL-NEXT: ret i32 [[RES]]
-define i32 @promotionOfArgEndsUpInValue(i16* %addr) {
+define i32 @promotionOfArgEndsUpInValue(ptr %addr) {
 entry:
-  %val = load i16, i16* %addr
-  %add = add nuw nsw i16 %val, zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @c, i64 0, i64 1), i32* @a) to i16)
+  %val = load i16, ptr %addr
+  %add = add nuw nsw i16 %val, zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i16)
   %conv3 = sext i16 %add to i32
   ret i32 %conv3
 }
 
 ; Check that we see that one zext can be derived from the other for free.
 ; OPTALL-LABEL: @promoteTwoArgZextWithSourceExtendedTwice
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; OPT-NEXT: [[ZEXT32:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
 ; OPT-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], %b
 ; OPT-NEXT: [[RES64:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ZEXT64]], 12
-; OPT-NEXT: store i32 [[RES32]], i32* %addr
-; OPT-NEXT: store i64 [[RES64]], i64* %q
+; OPT-NEXT: store i32 [[RES32]], ptr %addr
+; OPT-NEXT: store i64 [[RES64]], ptr %q
 ;
 ; DISABLE-NEXT: [[ZEXT32:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
 ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], %b
 ; DISABLE-NEXT: [[RES2_32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], 12
-; DISABLE-NEXT: store i32 [[RES32]], i32* %addr
+; DISABLE-NEXT: store i32 [[RES32]], ptr %addr
 ; DISABLE-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i32 [[RES2_32]] to i64
-; DISABLE-NEXT: store i64 [[ZEXT64]], i64* %q
+; DISABLE-NEXT: store i64 [[ZEXT64]], ptr %q
 ;
 ; OPTALL-NEXT: ret void
-define void @promoteTwoArgZextWithSourceExtendedTwice(i8* %p, i64* %q, i32 %b, i32* %addr) {
+define void @promoteTwoArgZextWithSourceExtendedTwice(ptr %p, ptr %q, i32 %b, ptr %addr) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nuw i32 %zextt, %b
   %add2 = add nuw i32 %zextt, 12
-  store i32 %add, i32 *%addr
+  store i32 %add, ptr %addr
   %s = zext i32 %add2 to i64
-  store i64 %s, i64* %q
+  store i64 %s, ptr %q
   ret void
 }
 
@@ -388,7 +388,7 @@ entry:
 ; all the way through the load we would end up with a free zext and a
 ; non-free sext (of %b).
 ; OPTALL-LABEL: @doNotPromoteFreeSExtFromAddrMode
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
@@ -403,17 +403,17 @@ entry:
 ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b
 ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64
 ;
-; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, i32* %addr, i64 [[IDX64]]
-; OPTALL-NEXT: store i32 [[RES32]], i32* [[GEP]]
+; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, ptr %addr, i64 [[IDX64]]
+; OPTALL-NEXT: store i32 [[RES32]], ptr [[GEP]]
 ; OPTALL-NEXT: ret void
-define void @doNotPromoteFreeSExtFromAddrMode(i8* %p, i32 %b, i32* %addr) {
+define void @doNotPromoteFreeSExtFromAddrMode(ptr %p, i32 %b, ptr %addr) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nsw i32 %zextt, %b
   %idx64 = sext i32 %add to i64
-  %staddr = getelementptr inbounds i32, i32* %addr, i64 %idx64
-  store i32 %add, i32 *%staddr
+  %staddr = getelementptr inbounds i32, ptr %addr, i64 %idx64
+  store i32 %add, ptr %staddr
   ret void
 }
 
@@ -422,7 +422,7 @@ entry:
 ; all the way through the load we would end up with a free zext and a
 ; non-free sext (of %b).
 ; OPTALL-LABEL: @doNotPromoteFreeSExtFromAddrMode64
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
@@ -436,17 +436,17 @@ entry:
 ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b
 ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64
 ;
-; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i64, i64* %addr, i64 [[IDX64]]
-; OPTALL-NEXT: store i64 %stuff, i64* [[GEP]]
+; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i64, ptr %addr, i64 [[IDX64]]
+; OPTALL-NEXT: store i64 %stuff, ptr [[GEP]]
 ; OPTALL-NEXT: ret void
-define void @doNotPromoteFreeSExtFromAddrMode64(i8* %p, i32 %b, i64* %addr, i64 %stuff) {
+define void @doNotPromoteFreeSExtFromAddrMode64(ptr %p, i32 %b, ptr %addr, i64 %stuff) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nsw i32 %zextt, %b
   %idx64 = sext i32 %add to i64
-  %staddr = getelementptr inbounds i64, i64* %addr, i64 %idx64
-  store i64 %stuff, i64 *%staddr
+  %staddr = getelementptr inbounds i64, ptr %addr, i64 %idx64
+  store i64 %stuff, ptr %staddr
   ret void
 }
 
@@ -455,7 +455,7 @@ entry:
 ; all the way through the load we would end up with a free zext and a
 ; non-free sext (of %b).
 ; OPTALL-LABEL: @doNotPromoteFreeSExtFromAddrMode128
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
@@ -469,17 +469,17 @@ entry:
 ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b
 ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64
 ;
-; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i128, i128* %addr, i64 [[IDX64]]
-; OPTALL-NEXT: store i128 %stuff, i128* [[GEP]]
+; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i128, ptr %addr, i64 [[IDX64]]
+; OPTALL-NEXT: store i128 %stuff, ptr [[GEP]]
 ; OPTALL-NEXT: ret void
-define void @doNotPromoteFreeSExtFromAddrMode128(i8* %p, i32 %b, i128* %addr, i128 %stuff) {
+define void @doNotPromoteFreeSExtFromAddrMode128(ptr %p, i32 %b, ptr %addr, i128 %stuff) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nsw i32 %zextt, %b
   %idx64 = sext i32 %add to i64
-  %staddr = getelementptr inbounds i128, i128* %addr, i64 %idx64
-  store i128 %stuff, i128 *%staddr
+  %staddr = getelementptr inbounds i128, ptr %addr, i64 %idx64
+  store i128 %stuff, ptr %staddr
   ret void
 }
 
@@ -489,7 +489,7 @@ entry:
 ; all the way through the load we would end up with a free zext and a
 ; non-free sext (of %b).
 ; OPTALL-LABEL: @promoteSExtFromAddrMode256
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; OPT-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
@@ -499,17 +499,17 @@ entry:
 ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ZEXT32]], %b
 ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = sext i32 [[RES32]] to i64
 ;
-; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i256, i256* %addr, i64 [[IDX64]]
-; OPTALL-NEXT: store i256 %stuff, i256* [[GEP]]
+; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i256, ptr %addr, i64 [[IDX64]]
+; OPTALL-NEXT: store i256 %stuff, ptr [[GEP]]
 ; OPTALL-NEXT: ret void
-define void @promoteSExtFromAddrMode256(i8* %p, i32 %b, i256* %addr, i256 %stuff) {
+define void @promoteSExtFromAddrMode256(ptr %p, i32 %b, ptr %addr, i256 %stuff) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nsw i32 %zextt, %b
   %idx64 = sext i32 %add to i64
-  %staddr = getelementptr inbounds i256, i256* %addr, i64 %idx64
-  store i256 %stuff, i256 *%staddr
+  %staddr = getelementptr inbounds i256, ptr %addr, i64 %idx64
+  store i256 %stuff, ptr %staddr
   ret void
 }
 
@@ -522,7 +522,7 @@ entry:
 ; expose more opportunities.
 ; This would need to be fixed at some point.
 ; OPTALL-LABEL: @doNotPromoteFreeZExtFromAddrMode
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; This transformation should really happen only for stress mode.
 ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
@@ -534,22 +534,22 @@ entry:
 ; DISABLE-NEXT: [[RES32:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT32]], %b
 ; DISABLE-NEXT: [[IDX64:%[a-zA-Z_0-9-]+]] = zext i32 [[RES32]] to i64
 ;
-; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, i32* %addr, i64 [[IDX64]]
-; OPTALL-NEXT: store i32 [[RES32]], i32* [[GEP]]
+; OPTALL-NEXT: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, ptr %addr, i64 [[IDX64]]
+; OPTALL-NEXT: store i32 [[RES32]], ptr [[GEP]]
 ; OPTALL-NEXT: ret void
-define void @doNotPromoteFreeZExtFromAddrMode(i8* %p, i32 %b, i32* %addr) {
+define void @doNotPromoteFreeZExtFromAddrMode(ptr %p, i32 %b, ptr %addr) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nuw i32 %zextt, %b
   %idx64 = zext i32 %add to i64
-  %staddr = getelementptr inbounds i32, i32* %addr, i64 %idx64
-  store i32 %add, i32 *%staddr
+  %staddr = getelementptr inbounds i32, ptr %addr, i64 %idx64
+  store i32 %add, ptr %staddr
   ret void
 }
 
 ; OPTALL-LABEL: @doNotPromoteFreeSExtFromShift
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; STRESS-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
 ; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i32 %b to i64
@@ -565,9 +565,9 @@ entry:
 ;
 ; OPTALL-NEXT: [[RES64:%[a-zA-Z_0-9-]+]] = shl i64 [[IDX64]], 12
 ; OPTALL-NEXT: ret i64 %staddr
-define i64 @doNotPromoteFreeSExtFromShift(i8* %p, i32 %b) {
+define i64 @doNotPromoteFreeSExtFromShift(ptr %p, i32 %b) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nsw i32 %zextt, %b
   %idx64 = sext i32 %add to i64
@@ -577,7 +577,7 @@ entry:
 
 ; Same comment as doNotPromoteFreeZExtFromAddrMode.
 ; OPTALL-LABEL: @doNotPromoteFreeZExtFromShift
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, ptr %p
 ;
 ; This transformation should really happen only for stress mode.
 ; OPT-NEXT: [[ZEXT64:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
@@ -590,9 +590,9 @@ entry:
 ;
 ; OPTALL-NEXT: [[RES64:%[a-zA-Z_0-9-]+]] = shl i64 [[IDX64]], 12
 ; OPTALL-NEXT: ret i64 %staddr
-define i64 @doNotPromoteFreeZExtFromShift(i8* %p, i32 %b) {
+define i64 @doNotPromoteFreeZExtFromShift(ptr %p, i32 %b) {
 entry:
-  %t = load i8, i8* %p
+  %t = load i8, ptr %p
   %zextt = zext i8 %t to i32
   %add = add nuw i32 %zextt, %b
   %idx64 = zext i32 %add to i64
@@ -608,9 +608,9 @@ entry:
 ; sext.
 ; This would need to be fixed at some point.
 ; OPTALL-LABEL: @doNotPromoteBecauseOfPairedLoad
-; OPTALL: [[LD0:%[a-zA-Z_0-9-]+]] = load i32, i32* %p
-; OPTALL: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, i32* %p, i64 1
-; OPTALL: [[LD1:%[a-zA-Z_0-9-]+]] = load i32, i32* [[GEP]]
+; OPTALL: [[LD0:%[a-zA-Z_0-9-]+]] = load i32, ptr %p
+; OPTALL: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i32, ptr %p, i64 1
+; OPTALL: [[LD1:%[a-zA-Z_0-9-]+]] = load i32, ptr [[GEP]]
 ;
 ; This transformation should really happen only for stress mode.
 ; OPT-NEXT: [[SEXTLD1:%[a-zA-Z_0-9-]+]] = sext i32 [[LD1]] to i64
@@ -623,10 +623,10 @@ entry:
 ; OPTALL-NEXT: [[ZEXTLD0:%[a-zA-Z_0-9-]+]] = zext i32 [[LD0]] to i64
 ; OPTALL-NEXT: [[FINAL:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTRES]], [[ZEXTLD0]]
 ; OPTALL-NEXT: ret i64 [[FINAL]]
-define i64 @doNotPromoteBecauseOfPairedLoad(i32* %p, i32 %cst) {
-  %ld0 = load i32, i32* %p
-  %idxLd1 = getelementptr inbounds i32, i32* %p, i64 1
-  %ld1 = load i32, i32* %idxLd1
+define i64 @doNotPromoteBecauseOfPairedLoad(ptr %p, i32 %cst) {
+  %ld0 = load i32, ptr %p
+  %idxLd1 = getelementptr inbounds i32, ptr %p, i64 1
+  %ld1 = load i32, ptr %idxLd1
   %res = add nsw i32 %ld1, %cst
   %sextres = sext i32 %res to i64
   %zextLd0 = zext i32 %ld0 to i64
@@ -634,17 +634,17 @@ define i64 @doNotPromoteBecauseOfPairedLoad(i32* %p, i32 %cst) {
   ret i64 %final
 }
 
-define i64 @promoteZextShl(i1 %c, i16* %P) {
+define i64 @promoteZextShl(i1 %c, ptr %P) {
 entry:
 ; OPTALL-LABEL: promoteZextShl
 ; OPTALL: entry:
-; OPT: %[[LD:.*]] = load i16, i16* %P
+; OPT: %[[LD:.*]] = load i16, ptr %P
 ; OPT: %[[EXT:.*]] = zext i16 %[[LD]] to i64
 ; OPT: if.then:
 ; OPT: shl nsw i64 %[[EXT]], 1
 ; DISABLE: if.then:
 ; DISABLE: %r = sext i32 %shl2 to i64
-  %ld = load i16, i16* %P
+  %ld = load i16, ptr %P
   br i1 %c, label %end, label %if.then
 if.then:
   %z = zext i16 %ld to i32

diff  --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
index 05f467e1934fd..330f47da6310a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
@@ -8,13 +8,13 @@
 ; to remove arbitrary values, so we have to live with garbage values.
 ; <rdar://problem/16041712>
 
-%"class.H4ISP::H4ISPDevice" = type { i32 (%"class.H4ISP::H4ISPDevice"*, i32, i8*, i8*)*, i8*, i32*, %"class.H4ISP::H4ISPCameraManager"* }
+%"class.H4ISP::H4ISPDevice" = type { ptr, ptr, ptr, ptr }
 
 %"class.H4ISP::H4ISPCameraManager" = type opaque
 
-declare i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"*)
+declare i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(ptr)
 
- at pH4ISPDevice = hidden global %"class.H4ISP::H4ISPDevice"* null, align 8
+ at pH4ISPDevice = hidden global ptr null, align 8
 
 ; CHECK-LABEL: _foo:
 ; CHECK: ret
@@ -23,14 +23,14 @@ define void @foo() {
 entry:
   br label %if.then83
 if.then83:                                        ; preds = %if.end81
-  %tmp = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
-  %call84 = call i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"* %tmp) #19
+  %tmp = load ptr, ptr @pH4ISPDevice, align 8
+  %call84 = call i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(ptr %tmp) #19
   tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27}"()
-  %tmp2 = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
+  %tmp2 = load ptr, ptr @pH4ISPDevice, align 8
   tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x28}"()
-  %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice", %"class.H4ISP::H4ISPDevice"* %tmp2, i64 0, i32 3
-  %tmp3 = load %"class.H4ISP::H4ISPCameraManager"*, %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8
-  %tobool.i269 = icmp eq %"class.H4ISP::H4ISPCameraManager"* %tmp3, null
+  %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice", ptr %tmp2, i64 0, i32 3
+  %tmp3 = load ptr, ptr %pCameraManager.i268, align 8
+  %tobool.i269 = icmp eq ptr %tmp3, null
   br i1 %tobool.i269, label %if.then83, label %end
 end:
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll
index 962e36ddb61a7..acc0df12a94e8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh-str.ll
@@ -7,7 +7,7 @@
 ; at least provide a wrong one (with the offset folded
 ; into the definition).
 
-%struct.anon = type { i32*, i32** }
+%struct.anon = type { ptr, ptr }
 
 @pptp_wan_head = internal global %struct.anon zeroinitializer, align 8
 
@@ -16,8 +16,8 @@
 ; CHECK-NOT: AdrpAddStr
 define i32 @pptp_wan_init() {
 entry:
-  store i32* null, i32** getelementptr inbounds (%struct.anon, %struct.anon* @pptp_wan_head, i64 0, i32 0), align 8
-  store i32** getelementptr inbounds (%struct.anon, %struct.anon* @pptp_wan_head, i64 0, i32 0), i32*** getelementptr inbounds (%struct.anon, %struct.anon* @pptp_wan_head, i64 0, i32 1), align 8
+  store ptr null, ptr @pptp_wan_head, align 8
+  store ptr @pptp_wan_head, ptr getelementptr inbounds (%struct.anon, ptr @pptp_wan_head, i64 0, i32 1), align 8
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-const-addr.ll b/llvm/test/CodeGen/AArch64/arm64-const-addr.ll
index bbb1ce4aced72..79807730746bc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-const-addr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-const-addr.ll
@@ -9,14 +9,14 @@ define i32 @test1() nounwind {
 ; CHECK-NEXT:   movk  w8, #1039, lsl #16
 ; CHECK-NEXT:   ldp w9, w10, [x8, #4]
 ; CHECK:        ldr w8, [x8, #12]
-  %at = inttoptr i64 68141056 to %T*
-  %o1 = getelementptr %T, %T* %at, i32 0, i32 1
-  %t1 = load i32, i32* %o1
-  %o2 = getelementptr %T, %T* %at, i32 0, i32 2
-  %t2 = load i32, i32* %o2
+  %at = inttoptr i64 68141056 to ptr
+  %o1 = getelementptr %T, ptr %at, i32 0, i32 1
+  %t1 = load i32, ptr %o1
+  %o2 = getelementptr %T, ptr %at, i32 0, i32 2
+  %t2 = load i32, ptr %o2
   %a1 = add i32 %t1, %t2
-  %o3 = getelementptr %T, %T* %at, i32 0, i32 3
-  %t3 = load i32, i32* %o3
+  %o3 = getelementptr %T, ptr %at, i32 0, i32 3
+  %t3 = load i32, ptr %o3
   %a2 = add i32 %a1, %t3
   ret i32 %a2
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
index 9910a4e0f7391..e6b05f7182f8c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
 
-define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) {
+define <4 x i16> @fptosi_v4f64_to_v4i16(ptr %ptr) {
 ; CHECK-LABEL: fptosi_v4f64_to_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -12,12 +12,12 @@ define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) {
 ; CHECK-NEXT:    xtn v1.2s, v1.2d
 ; CHECK-NEXT:    uzp1 v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x double>, <4 x double>* %ptr
+  %tmp1 = load <4 x double>, ptr %ptr
   %tmp2 = fptosi <4 x double> %tmp1 to <4 x i16>
   ret <4 x i16> %tmp2
 }
 
-define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) {
+define <8 x i8> @fptosi_v4f64_to_v4i8(ptr %ptr) {
 ; CHECK-LABEL: fptosi_v4f64_to_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -34,12 +34,12 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) {
 ; CHECK-NEXT:    uzp1 v1.4h, v2.4h, v3.4h
 ; CHECK-NEXT:    uzp1 v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x double>, <8 x double>* %ptr
+  %tmp1 = load <8 x double>, ptr %ptr
   %tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
   ret <8 x i8> %tmp2
 }
 
-define <4 x half> @uitofp_v4i64_to_v4f16(<4 x i64>* %ptr) {
+define <4 x half> @uitofp_v4i64_to_v4f16(ptr %ptr) {
 ; CHECK-LABEL: uitofp_v4i64_to_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -49,24 +49,24 @@ define <4 x half> @uitofp_v4i64_to_v4f16(<4 x i64>* %ptr) {
 ; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
 ; CHECK-NEXT:    fcvtn v0.4h, v0.4s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i64>, <4 x i64>* %ptr
+  %tmp1 = load <4 x i64>, ptr %ptr
   %tmp2 = uitofp <4 x i64> %tmp1 to <4 x half>
   ret <4 x half> %tmp2
 }
 
-define <4 x i16> @trunc_v4i64_to_v4i16(<4 x i64>* %ptr) {
+define <4 x i16> @trunc_v4i64_to_v4i16(ptr %ptr) {
 ; CHECK-LABEL: trunc_v4i64_to_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    uzp1 v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    xtn v0.4h, v0.4s
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i64>, <4 x i64>* %ptr
+  %tmp1 = load <4 x i64>, ptr %ptr
   %tmp2 = trunc <4 x i64> %tmp1 to <4 x i16>
   ret <4 x i16> %tmp2
 }
 
-define <4 x i16> @fptoui_v4f64_to_v4i16(<4 x double>* %ptr) {
+define <4 x i16> @fptoui_v4f64_to_v4i16(ptr %ptr) {
 ; CHECK-LABEL: fptoui_v4f64_to_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -76,7 +76,7 @@ define <4 x i16> @fptoui_v4f64_to_v4i16(<4 x double>* %ptr) {
 ; CHECK-NEXT:    xtn v1.2s, v1.2d
 ; CHECK-NEXT:    uzp1 v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x double>, <4 x double>* %ptr
+  %tmp1 = load <4 x double>, ptr %ptr
   %tmp2 = fptoui <4 x double> %tmp1 to <4 x i16>
   ret <4 x i16> %tmp2
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll b/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll
index 1803787d729f9..9d71338c9c97a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-copy-tuple.ll
@@ -7,140 +7,133 @@
 ; We use dummy inline asm to force LLVM to generate a COPY between the registers
 ; we want by clobbering all the others.
 
-define void @test_D1D2_from_D0D1(i8* %addr) #0 {
+define void @test_D1D2_from_D0D1(ptr %addr) #0 {
 ; CHECK-LABEL: test_D1D2_from_D0D1:
 ; CHECK: mov.8b v2, v1
 ; CHECK: mov.8b v1, v0
 entry:
-  %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
-  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr)
   %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
   %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
   tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
 
   tail call void asm sideeffect "", "~{v0},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
   ret void
 }
 
-define void @test_D0D1_from_D1D2(i8* %addr) #0 {
+define void @test_D0D1_from_D1D2(ptr %addr) #0 {
 ; CHECK-LABEL: test_D0D1_from_D1D2:
 ; CHECK: mov.8b v0, v1
 ; CHECK: mov.8b v1, v2
 entry:
-  %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
-  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr)
   %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
   %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
   tail call void asm sideeffect "", "~{v0},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
 
   tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
   ret void
 }
 
-define void @test_D0D1_from_D31D0(i8* %addr) #0 {
+define void @test_D0D1_from_D31D0(ptr %addr) #0 {
 ; CHECK-LABEL: test_D0D1_from_D31D0:
 ; CHECK: mov.8b v1, v0
 ; CHECK: mov.8b v0, v31
 entry:
-  %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
-  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr)
   %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
   %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
   tail call void asm sideeffect "", "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
 
   tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
   ret void
 }
 
-define void @test_D31D0_from_D0D1(i8* %addr) #0 {
+define void @test_D31D0_from_D0D1(ptr %addr) #0 {
 ; CHECK-LABEL: test_D31D0_from_D0D1:
 ; CHECK: mov.8b v31, v0
 ; CHECK: mov.8b v0, v1
 entry:
-  %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
-  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+  %vec = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %addr)
   %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
   %vec1 = extractvalue { <8 x i8>, <8 x i8> } %vec, 1
   tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
 
   tail call void asm sideeffect "", "~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30}"()
-  tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
+  tail call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, ptr %addr)
   ret void
 }
 
-define void @test_D2D3D4_from_D0D1D2(i8* %addr) #0 {
+define void @test_D2D3D4_from_D0D1D2(ptr %addr) #0 {
 ; CHECK-LABEL: test_D2D3D4_from_D0D1D2:
 ; CHECK: mov.8b v4, v2
 ; CHECK: mov.8b v3, v1
 ; CHECK: mov.8b v2, v0
 entry:
-  %addr_v8i8 = bitcast i8* %addr to <8 x i8>*
-  %vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>* %addr_v8i8)
+  %vec = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr %addr)
   %vec0 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 0
   %vec1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 1
   %vec2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vec, 2
 
   tail call void asm sideeffect "", "~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, i8* %addr)
+  tail call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, ptr %addr)
 
   tail call void asm sideeffect "", "~{v0},~{v1},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, i8* %addr)
+  tail call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %vec0, <8 x i8> %vec1, <8 x i8> %vec2, ptr %addr)
   ret void
 }
 
-define void @test_Q0Q1Q2_from_Q1Q2Q3(i8* %addr) #0 {
+define void @test_Q0Q1Q2_from_Q1Q2Q3(ptr %addr) #0 {
 ; CHECK-LABEL: test_Q0Q1Q2_from_Q1Q2Q3:
 ; CHECK: mov.16b v0, v1
 ; CHECK: mov.16b v1, v2
 ; CHECK: mov.16b v2, v3
 entry:
-  %addr_v16i8 = bitcast i8* %addr to <16 x i8>*
-  %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
+  %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr %addr)
   %vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0
   %vec1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 1
   %vec2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vec, 2
   tail call void asm sideeffect "", "~{v0},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, i8* %addr)
+  tail call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, ptr %addr)
 
   tail call void asm sideeffect "", "~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, i8* %addr)
+  tail call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, ptr %addr)
   ret void
 }
 
-define void @test_Q1Q2Q3Q4_from_Q30Q31Q0Q1(i8* %addr) #0 {
+define void @test_Q1Q2Q3Q4_from_Q30Q31Q0Q1(ptr %addr) #0 {
 ; CHECK-LABEL: test_Q1Q2Q3Q4_from_Q30Q31Q0Q1:
 ; CHECK: mov.16b v4, v1
 ; CHECK: mov.16b v3, v0
 ; CHECK: mov.16b v2, v31
 ; CHECK: mov.16b v1, v30
-  %addr_v16i8 = bitcast i8* %addr to <16 x i8>*
-  %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i8>* %addr_v16i8)
+  %vec = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %addr)
   %vec0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 0
   %vec1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 1
   %vec2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 2
   %vec3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vec, 3
 
   tail call void asm sideeffect "", "~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29}"()
-  tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, i8* %addr)
+  tail call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, ptr %addr)
 
   tail call void asm sideeffect "", "~{v0},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
-  tail call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, i8* %addr)
+  tail call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %vec0, <16 x i8> %vec1, <16 x i8> %vec2, <16 x i8> %vec3, ptr %addr)
   ret void
 }
 
-declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*)
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0v8i8(<8 x i8>*)
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0v16i8(<16 x i8>*)
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0v16i8(<16 x i8>*)
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr)
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr)
 
-declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
-declare void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*)
-declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*)
-declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8>, <8 x i8>, ptr)
+declare void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, ptr)
+declare void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, ptr)
+declare void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-cse.ll b/llvm/test/CodeGen/AArch64/arm64-cse.ll
index 00f519a942721..9ea51161dad0e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-cse.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-cse.ll
@@ -5,7 +5,7 @@ target triple = "arm64-apple-ios"
 ; rdar://12462006
 ; CSE between "icmp reg reg" and "sub reg reg".
 ; Both can be in the same basic block or in 
diff erent basic blocks.
-define i8* @t1(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
+define ptr @t1(ptr %base, ptr nocapture %offset, i32 %size) nounwind {
 ; CHECK-LABEL: t1:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr w9, [x1]
@@ -20,7 +20,7 @@ define i8* @t1(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
 ; CHECK-NEXT:    str w9, [x1]
 ; CHECK-NEXT:    ret
 entry:
- %0 = load i32, i32* %offset, align 4
+ %0 = load i32, ptr %offset, align 4
  %cmp = icmp slt i32 %0, %size
  %s = sub nsw i32 %0, %size
  br i1 %cmp, label %return, label %if.end
@@ -29,17 +29,17 @@ if.end:
  %sub = sub nsw i32 %0, %size
  %s2 = sub nsw i32 %s, %size
  %s3 = sub nsw i32 %sub, %s2
- store i32 %s3, i32* %offset, align 4
- %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub
+ store i32 %s3, ptr %offset, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %base, i32 %sub
  br label %return
 
 return:
- %retval.0 = phi i8* [ %add.ptr, %if.end ], [ null, %entry ]
- ret i8* %retval.0
+ %retval.0 = phi ptr [ %add.ptr, %if.end ], [ null, %entry ]
+ ret ptr %retval.0
 }
 
 ; CSE between "icmp reg imm" and "sub reg imm".
-define i8* @t2(i8* %base, i32* nocapture %offset) nounwind {
+define ptr @t2(ptr %base, ptr nocapture %offset) nounwind {
 ; CHECK-LABEL: t2:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr w8, [x1]
@@ -53,17 +53,17 @@ define i8* @t2(i8* %base, i32* nocapture %offset) nounwind {
 ; CHECK-NEXT:    mov x0, xzr
 ; CHECK-NEXT:    ret
 entry:
- %0 = load i32, i32* %offset, align 4
+ %0 = load i32, ptr %offset, align 4
  %cmp = icmp slt i32 %0, 1
  br i1 %cmp, label %return, label %if.end
 
 if.end:
  %sub = sub nsw i32 %0, 1
- store i32 %sub, i32* %offset, align 4
- %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub
+ store i32 %sub, ptr %offset, align 4
+ %add.ptr = getelementptr inbounds i8, ptr %base, i32 %sub
  br label %return
 
 return:
- %retval.0 = phi i8* [ %add.ptr, %if.end ], [ null, %entry ]
- ret i8* %retval.0
+ %retval.0 = phi ptr [ %add.ptr, %if.end ], [ null, %entry ]
+ ret ptr %retval.0
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-csel.ll b/llvm/test/CodeGen/AArch64/arm64-csel.ll
index e04b42d637207..5dd826d9bf549 100644
--- a/llvm/test/CodeGen/AArch64/arm64-csel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-csel.ll
@@ -379,7 +379,7 @@ define i64 @foo23(i64 %x) {
   ret i64 %res
 }
 
-define i16 @foo24(i8* nocapture readonly %A, i8* nocapture readonly %B) {
+define i16 @foo24(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: foo24:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -390,17 +390,17 @@ define i16 @foo24(i8* nocapture readonly %A, i8* nocapture readonly %B) {
 ; CHECK-NEXT:    cinc w0, w8, hi
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %A, align 1
+  %0 = load i8, ptr %A, align 1
   %cmp = icmp ugt i8 %0, 3
   %conv1 = zext i1 %cmp to i16
-  %1 = load i8, i8* %B, align 1
+  %1 = load i8, ptr %B, align 1
   %cmp4 = icmp ugt i8 %1, 33
   %conv5 = zext i1 %cmp4 to i16
   %add = add nuw nsw i16 %conv5, %conv1
   ret i16 %add
 }
 
-define i64 @foo25(i64* nocapture readonly %A, i64* nocapture readonly %B) {
+define i64 @foo25(ptr nocapture readonly %A, ptr nocapture readonly %B) {
 ; CHECK-LABEL: foo25:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x1]
@@ -411,10 +411,10 @@ define i64 @foo25(i64* nocapture readonly %A, i64* nocapture readonly %B) {
 ; CHECK-NEXT:    cinc x0, x8, hi
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i64, i64* %A, align 1
+  %0 = load i64, ptr %A, align 1
   %cmp = icmp ugt i64 %0, 3
   %conv1 = zext i1 %cmp to i64
-  %1 = load i64, i64* %B, align 1
+  %1 = load i64, ptr %B, align 1
   %cmp4 = icmp ugt i64 %1, 33
   %conv5 = zext i1 %cmp4 to i64
   %add = add nuw nsw i64 %conv5, %conv1

diff  --git a/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll b/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll
index e02a3a8ba7fd6..9a485f175b2c4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-csldst-mmo.ll
@@ -20,6 +20,6 @@
 define void @test1() {
 entry:
   tail call void asm sideeffect "nop", "~{x20},~{x21},~{x22},~{x23}"() nounwind
-  store i32 0, i32* getelementptr inbounds ([0 x i32], [0 x i32]* @G, i64 0, i64 0), align 4
+  store i32 0, ptr @G, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
index 3cee66c2d29ff..31a2c74d3bd15 100644
--- a/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-custom-call-saved-reg.ll
@@ -78,8 +78,8 @@ define dso_local void @callee() {
 ; CHECK-SAVED-ALL-NEXT: stp x11, x10, [sp
 ; CHECK-SAVED-ALL-NEXT: stp x9, x8, [sp
 
-  %val = load volatile [30 x i64], [30 x i64]* @var
-  store volatile [30 x i64] %val, [30 x i64]* @var
+  %val = load volatile [30 x i64], ptr @var
+  store volatile [30 x i64] %val, ptr @var
 
 ; CHECK-SAVED-ALL: ldp x9, x8, [sp
 ; CHECK-SAVED-ALL-NEXT: ldp x11, x10, [sp
@@ -104,7 +104,7 @@ define dso_local void @callee() {
 define dso_local void @caller() {
 ; CHECK-LABEL: caller
 
-  %val = load volatile [30 x i64], [30 x i64]* @var
+  %val = load volatile [30 x i64], ptr @var
 ; CHECK-SAVED-X8: adrp x8, var
 ; CHECK-SAVED-X9: adrp x9, var
 ; CHECK-SAVED-X10: adrp x10, var
@@ -139,7 +139,7 @@ define dso_local void @caller() {
   call void @callee()
 ; CHECK: bl callee
 
-  store volatile [30 x i64] %val, [30 x i64]* @var
+  store volatile [30 x i64] %val, ptr @var
 ; CHECK-SAVED-ALL-DAG: str x9
 ; CHECK-SAVED-ALL-DAG: str x10
 ; CHECK-SAVED-ALL-DAG: str x11

diff  --git a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
index 37f3504be935f..efe1e203e2c99 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
@@ -2,7 +2,7 @@
 target datalayout = "e-i64:64-n32:64-S128"
 target triple = "arm64-apple-ios"
 
-%"struct.SU" = type { i32, %"struct.SU"*, i32*, i32, i32, %"struct.BO", i32, [5 x i8] }
+%"struct.SU" = type { i32, ptr, ptr, i32, i32, %"struct.BO", i32, [5 x i8] }
 %"struct.BO" = type { %"struct.RE" }
 
 %"struct.RE" = type { i32, i32, i32, i32 }
@@ -15,14 +15,13 @@ target triple = "arm64-apple-ios"
 ; CHECK-NOT: ldr
 ; CHECK: str wzr
 ; CHECK-NOT: str
-define void @test(%"struct.SU"* nocapture %su) {
+define void @test(ptr nocapture %su) {
 entry:
-  %r1 = getelementptr inbounds %"struct.SU", %"struct.SU"* %su, i64 1, i32 5
-  %r2 = bitcast %"struct.BO"* %r1 to i48*
-  %r3 = load i48, i48* %r2, align 8
+  %r1 = getelementptr inbounds %"struct.SU", ptr %su, i64 1, i32 5
+  %r3 = load i48, ptr %r1, align 8
   %r4 = and i48 %r3, -4294967296
   %r5 = or i48 0, %r4
-  store i48 %r5, i48* %r2, align 8
+  store i48 %r5, ptr %r1, align 8
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
index 72d94ae13b0fc..7e72e8de01f4f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
@@ -12,28 +12,25 @@
 ; CHECK: fadd {{s[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]]
 ; CHECK: fadd {{s[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]]
 ; CHECK: ret
-define void @test(%class.Complex* nocapture %out, i64 %out_start) {
+define void @test(ptr nocapture %out, i64 %out_start) {
 entry:
-  %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
-  %0 = bitcast %class.Complex* %arrayidx to i64*
-  %1 = load i64, i64* %0, align 4
-  %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
-  %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
-  %t0.sroa.2.0.extract.shift = lshr i64 %1, 32
+  %arrayidx = getelementptr inbounds %class.Complex, ptr %out, i64 %out_start
+  %0 = load i64, ptr %arrayidx, align 4
+  %t0.sroa.0.0.extract.trunc = trunc i64 %0 to i32
+  %1 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
+  %t0.sroa.2.0.extract.shift = lshr i64 %0, 32
   %t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
-  %3 = bitcast i32 %t0.sroa.2.0.extract.trunc to float
+  %2 = bitcast i32 %t0.sroa.2.0.extract.trunc to float
   %add = add i64 %out_start, 8
-  %arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add
-  %i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0
-  %4 = load float, float* %i.i, align 4
-  %add.i = fadd float %4, %2
+  %arrayidx2 = getelementptr inbounds %class.Complex, ptr %out, i64 %add
+  %3 = load float, ptr %arrayidx2, align 4
+  %add.i = fadd float %3, %1
   %retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
-  %r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1
-  %5 = load float, float* %r.i, align 4
-  %add5.i = fadd float %5, %3
+  %r.i = getelementptr inbounds %class.Complex, ptr %arrayidx2, i64 0, i32 1
+  %4 = load float, ptr %r.i, align 4
+  %add5.i = fadd float %4, %2
   %retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
-  %ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>*
-  store <2 x float> %retval.sroa.0.4.vec.insert.i, <2 x float>* %ref.tmp.sroa.0.0.cast, align 4
+  store <2 x float> %retval.sroa.0.4.vec.insert.i, ptr %arrayidx, align 4
   ret void
 }
 
@@ -44,28 +41,25 @@ entry:
 ; CHECK: add {{w[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]]
 ; CHECK: add {{w[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]]
 ; CHECK: ret
-define void @test_int(%class.Complex_int* nocapture %out, i64 %out_start) {
+define void @test_int(ptr nocapture %out, i64 %out_start) {
 entry:
-  %arrayidx = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %out_start
-  %0 = bitcast %class.Complex_int* %arrayidx to i64*
-  %1 = load i64, i64* %0, align 4
-  %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
-  %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to i32
-  %t0.sroa.2.0.extract.shift = lshr i64 %1, 32
+  %arrayidx = getelementptr inbounds %class.Complex_int, ptr %out, i64 %out_start
+  %0 = load i64, ptr %arrayidx, align 4
+  %t0.sroa.0.0.extract.trunc = trunc i64 %0 to i32
+  %1 = bitcast i32 %t0.sroa.0.0.extract.trunc to i32
+  %t0.sroa.2.0.extract.shift = lshr i64 %0, 32
   %t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
-  %3 = bitcast i32 %t0.sroa.2.0.extract.trunc to i32
+  %2 = bitcast i32 %t0.sroa.2.0.extract.trunc to i32
   %add = add i64 %out_start, 8
-  %arrayidx2 = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %add
-  %i.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 0
-  %4 = load i32, i32* %i.i, align 4
-  %add.i = add i32 %4, %2
+  %arrayidx2 = getelementptr inbounds %class.Complex_int, ptr %out, i64 %add
+  %3 = load i32, ptr %arrayidx2, align 4
+  %add.i = add i32 %3, %1
   %retval.sroa.0.0.vec.insert.i = insertelement <2 x i32> undef, i32 %add.i, i32 0
-  %r.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 1
-  %5 = load i32, i32* %r.i, align 4
-  %add5.i = add i32 %5, %3
+  %r.i = getelementptr inbounds %class.Complex_int, ptr %arrayidx2, i64 0, i32 1
+  %4 = load i32, ptr %r.i, align 4
+  %add5.i = add i32 %4, %2
   %retval.sroa.0.4.vec.insert.i = insertelement <2 x i32> %retval.sroa.0.0.vec.insert.i, i32 %add5.i, i32 1
-  %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_int* %arrayidx to <2 x i32>*
-  store <2 x i32> %retval.sroa.0.4.vec.insert.i, <2 x i32>* %ref.tmp.sroa.0.0.cast, align 4
+  store <2 x i32> %retval.sroa.0.4.vec.insert.i, ptr %arrayidx, align 4
   ret void
 }
 
@@ -76,27 +70,24 @@ entry:
 ; CHECK: add {{x[0-9]+}}, [[CPLX2_I]], [[CPLX1_I]]
 ; CHECK: add {{x[0-9]+}}, [[CPLX2_R]], [[CPLX1_R]]
 ; CHECK: ret
-define void @test_long(%class.Complex_long* nocapture %out, i64 %out_start) {
+define void @test_long(ptr nocapture %out, i64 %out_start) {
 entry:
-  %arrayidx = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %out_start
-  %0 = bitcast %class.Complex_long* %arrayidx to i128*
-  %1 = load i128, i128* %0, align 4
-  %t0.sroa.0.0.extract.trunc = trunc i128 %1 to i64
-  %2 = bitcast i64 %t0.sroa.0.0.extract.trunc to i64
-  %t0.sroa.2.0.extract.shift = lshr i128 %1, 64
+  %arrayidx = getelementptr inbounds %class.Complex_long, ptr %out, i64 %out_start
+  %0 = load i128, ptr %arrayidx, align 4
+  %t0.sroa.0.0.extract.trunc = trunc i128 %0 to i64
+  %1 = bitcast i64 %t0.sroa.0.0.extract.trunc to i64
+  %t0.sroa.2.0.extract.shift = lshr i128 %0, 64
   %t0.sroa.2.0.extract.trunc = trunc i128 %t0.sroa.2.0.extract.shift to i64
-  %3 = bitcast i64 %t0.sroa.2.0.extract.trunc to i64
+  %2 = bitcast i64 %t0.sroa.2.0.extract.trunc to i64
   %add = add i64 %out_start, 8
-  %arrayidx2 = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %add
-  %i.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 0
-  %4 = load i64, i64* %i.i, align 4
-  %add.i = add i64 %4, %2
+  %arrayidx2 = getelementptr inbounds %class.Complex_long, ptr %out, i64 %add
+  %3 = load i64, ptr %arrayidx2, align 4
+  %add.i = add i64 %3, %1
   %retval.sroa.0.0.vec.insert.i = insertelement <2 x i64> undef, i64 %add.i, i32 0
-  %r.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 1
-  %5 = load i64, i64* %r.i, align 4
-  %add5.i = add i64 %5, %3
+  %r.i = getelementptr inbounds %class.Complex_long, ptr %arrayidx2, i32 0, i32 1
+  %4 = load i64, ptr %r.i, align 4
+  %add5.i = add i64 %4, %2
   %retval.sroa.0.4.vec.insert.i = insertelement <2 x i64> %retval.sroa.0.0.vec.insert.i, i64 %add5.i, i32 1
-  %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_long* %arrayidx to <2 x i64>*
-  store <2 x i64> %retval.sroa.0.4.vec.insert.i, <2 x i64>* %ref.tmp.sroa.0.0.cast, align 4
+  store <2 x i64> %retval.sroa.0.4.vec.insert.i, ptr %arrayidx, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll b/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll
index 0be3fb12f5adf..448148fe23f2b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dead-def-frame-index.ll
@@ -6,7 +6,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 define i32 @test1() #0 {
   %tmp1 = alloca i8
   %tmp2 = alloca i32, i32 4096
-  %tmp3 = icmp eq i8* %tmp1, null
+  %tmp3 = icmp eq ptr %tmp1, null
   %tmp4 = zext i1 %tmp3 to i32
 
   ret i32 %tmp4

diff  --git a/llvm/test/CodeGen/AArch64/arm64-dup.ll b/llvm/test/CodeGen/AArch64/arm64-dup.ll
index 0947730ebab0a..6613f911f8258 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dup.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dup.ll
@@ -199,90 +199,90 @@ define <4 x float> @v_shuffledupQfloat(float %A) nounwind {
 	ret <4 x float> %tmp2
 }
 
-define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
+define <8 x i8> @vduplane8(ptr %A) nounwind {
 ; CHECK-LABEL: vduplane8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.8b v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
 	ret <8 x i8> %tmp2
 }
 
-define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
+define <4 x i16> @vduplane16(ptr %A) nounwind {
 ; CHECK-LABEL: vduplane16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.4h v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp1 = load <4 x i16>, ptr %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
 	ret <4 x i16> %tmp2
 }
 
-define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
+define <2 x i32> @vduplane32(ptr %A) nounwind {
 ; CHECK-LABEL: vduplane32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.2s v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
 	ret <2 x i32> %tmp2
 }
 
-define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
+define <2 x float> @vduplanefloat(ptr %A) nounwind {
 ; CHECK-LABEL: vduplanefloat:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.2s v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp1 = load <2 x float>, ptr %A
 	%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 >
 	ret <2 x float> %tmp2
 }
 
-define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
+define <16 x i8> @vduplaneQ8(ptr %A) nounwind {
 ; CHECK-LABEL: vduplaneQ8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.16b v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
 	ret <16 x i8> %tmp2
 }
 
-define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
+define <8 x i16> @vduplaneQ16(ptr %A) nounwind {
 ; CHECK-LABEL: vduplaneQ16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.8h v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp1 = load <4 x i16>, ptr %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
 	ret <8 x i16> %tmp2
 }
 
-define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
+define <4 x i32> @vduplaneQ32(ptr %A) nounwind {
 ; CHECK-LABEL: vduplaneQ32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.4s v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
 	ret <4 x i32> %tmp2
 }
 
-define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind {
+define <4 x float> @vduplaneQfloat(ptr %A) nounwind {
 ; CHECK-LABEL: vduplaneQfloat:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    dup.4s v0, v0[1]
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp1 = load <2 x float>, ptr %A
 	%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
 	ret <4 x float> %tmp2
 }
@@ -445,7 +445,7 @@ define <4 x float> @test_perfectshuffle_dupext_v4f32(<4 x float> %a, <4 x float>
   ret <4 x float> %r
 }
 
-define void @disguised_dup(<4 x float> %x, <4 x float>* %p1, <4 x float>* %p2) {
+define void @disguised_dup(<4 x float> %x, ptr %p1, ptr %p2) {
 ; CHECK-LABEL: disguised_dup:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext.16b v1, v0, v0, #4
@@ -456,8 +456,8 @@ define void @disguised_dup(<4 x float> %x, <4 x float>* %p1, <4 x float>* %p2) {
 ; CHECK-NEXT:    ret
   %shuf = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 0>
   %dup = shufflevector <4 x float> %shuf, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
-  store <4 x float> %shuf, <4 x float>* %p1, align 8
-  store <4 x float> %dup, <4 x float>* %p2, align 8
+  store <4 x float> %shuf, ptr %p1, align 8
+  store <4 x float> %dup, ptr %p2, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll
index 5f5672ec0867e..034822b700a97 100644
--- a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll
@@ -2,7 +2,7 @@
 target triple = "arm64-apple-macosx"
 
 ; CHECK: mm2
-define i32 @mm2(i32* nocapture %p, i32 %n) nounwind uwtable readonly ssp {
+define i32 @mm2(ptr nocapture %p, i32 %n) nounwind uwtable readonly ssp {
 entry:
   br label %do.body
 
@@ -13,9 +13,9 @@ do.body:
   %max.0 = phi i32 [ 0, %entry ], [ %max.1, %do.cond ]
   %min.0 = phi i32 [ 0, %entry ], [ %min.1, %do.cond ]
   %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ]
-  %p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ]
-  %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1
-  %0 = load i32, i32* %p.addr.0, align 4
+  %p.addr.0 = phi ptr [ %p, %entry ], [ %incdec.ptr, %do.cond ]
+  %incdec.ptr = getelementptr inbounds i32, ptr %p.addr.0, i64 1
+  %0 = load i32, ptr %p.addr.0, align 4
   %cmp = icmp sgt i32 %0, %max.0
   br i1 %cmp, label %do.cond, label %if.else
 
@@ -400,7 +400,7 @@ entry:
   br label %for.body
 
 for.body:
-  %x0 = load i32, i32* undef, align 4
+  %x0 = load i32, ptr undef, align 4
   br i1 undef, label %if.then.i146, label %is_sbox.exit155
 
 if.then.i146:
@@ -412,12 +412,12 @@ if.then.i146:
 is_sbox.exit155:                                  ; preds = %if.then.i146, %for.body
   %seg_offset.0.i151 = phi i32 [ %add9.i145, %if.then.i146 ], [ undef, %for.body ]
   %idxprom15.i152 = sext i32 %seg_offset.0.i151 to i64
-  %arrayidx18.i154 = getelementptr inbounds i32, i32* null, i64 %idxprom15.i152
-  %x1 = load i32, i32* %arrayidx18.i154, align 4
+  %arrayidx18.i154 = getelementptr inbounds i32, ptr null, i64 %idxprom15.i152
+  %x1 = load i32, ptr %arrayidx18.i154, align 4
   br i1 undef, label %for.body51, label %for.body
 
 for.body51:                                       ; preds = %is_sbox.exit155
-  call fastcc void @get_switch_type(i32 %x1, i32 undef, i16 signext undef, i16 signext undef, i16* undef)
+  call fastcc void @get_switch_type(i32 %x1, i32 undef, i16 signext undef, i16 signext undef, ptr undef)
   unreachable
 }
-declare fastcc void @get_switch_type(i32, i32, i16 signext, i16 signext, i16* nocapture) nounwind ssp
+declare fastcc void @get_switch_type(i32, i32, i16 signext, i16 signext, ptr nocapture) nounwind ssp

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll
index d59d5821ebf36..c38ab076e4ea5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll
@@ -1,92 +1,92 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @test_vextd(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextd:
 ;CHECK: {{ext.8b.*#3}}
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
 	ret <8 x i8> %tmp3
 }
 
-define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @test_vextRd(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextRd:
 ;CHECK: {{ext.8b.*#5}}
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @test_vextq(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextq:
 ;CHECK: {{ext.16b.*3}}
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
 	ret <16 x i8> %tmp3
 }
 
-define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @test_vextRq(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextRq:
 ;CHECK: {{ext.16b.*7}}
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @test_vextd16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextd16:
 ;CHECK: {{ext.8b.*#6}}
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
 	ret <4 x i16> %tmp3
 }
 
-define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @test_vextq32(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextq32:
 ;CHECK: {{ext.16b.*12}}
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
 	ret <4 x i32> %tmp3
 }
 
 ; Undef shuffle indices should not prevent matching to VEXT:
 
-define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @test_vextd_undef(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextd_undef:
 ;CHECK: {{ext.8b.*}}
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
 	ret <8 x i8> %tmp3
 }
 
-define <8 x i8> @test_vextd_undef2(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @test_vextd_undef2(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextd_undef2:
 ;CHECK: {{ext.8b.*#6}}
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 5>
   ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @test_vextRq_undef(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_vextRq_undef:
 ;CHECK: {{ext.16b.*#7}}
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @test_vextRq_undef2(<8 x i16>* %A) nounwind {
+define <8 x i16> @test_vextRq_undef2(ptr %A) nounwind {
 ;CHECK-LABEL: test_vextRq_undef2:
 ;CHECK: {{ext.16b.*#10}}
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %vext = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 4>
   ret <8 x i16> %vext;
 }
@@ -95,11 +95,11 @@ define <8 x i16> @test_vextRq_undef2(<8 x i16>* %A) nounwind {
 ; chosen to reach lowering phase as a BUILD_VECTOR.
 
 ; An undef in the shuffle list should still be optimizable
-define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <4 x i16> @test_undef(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: test_undef:
 ;CHECK: zip1.4h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
         ret <4 x i16> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-extend.ll b/llvm/test/CodeGen/AArch64/arm64-extend.ll
index 0ef68f8a53019..54661ff14697c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extend.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extend.ll
@@ -8,8 +8,8 @@ define i64 @foo(i32 %i) {
 ; CHECK:  ldrsw x0, [x[[REG1]], w0, sxtw #2]
 ; CHECK:  ret
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @array, i64 0, i64 %idxprom
-  %tmp1 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [0 x i32], ptr @array, i64 0, i64 %idxprom
+  %tmp1 = load i32, ptr %arrayidx, align 4
   %conv = sext i32 %tmp1 to i64
   ret i64 %conv
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll
index 5dd8cb282321e..5963d98ec3240 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 ; rdar://12771555
 
-define void @foo(i16* %ptr, i32 %a) nounwind {
+define void @foo(ptr %ptr, i32 %a) nounwind {
 entry:
 ; CHECK-LABEL: foo:
   %tmp1 = icmp ult i32 %a, 100
@@ -9,7 +9,7 @@ entry:
 bb1:
 ; CHECK: %bb1
 ; CHECK: ldrh [[REG:w[0-9]+]]
-  %tmp2 = load i16, i16* %ptr, align 2
+  %tmp2 = load i16, ptr %ptr, align 2
   br label %bb2
 bb2:
 ; CHECK-NOT: and {{w[0-9]+}}, [[REG]], #0xffff

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
index 1985a72b6bd8a..4140e7633e38f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
@@ -13,7 +13,7 @@ entry:
 ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]]
 ; CHECK: ldr w0, [x[[REG3]]]
 ; CHECK: ret
-  %0 = load i32, i32* getelementptr inbounds ([5001 x i32], [5001 x i32]* @sortlist, i32 0, i64 5000), align 4
+  %0 = load i32, ptr getelementptr inbounds ([5001 x i32], ptr @sortlist, i32 0, i64 5000), align 4
   ret i32 %0
 }
 
@@ -26,13 +26,13 @@ entry:
 ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]]
 ; CHECK: ldr x0, [x[[REG3]]]
 ; CHECK: ret
-  %0 = load i64, i64* getelementptr inbounds ([5001 x i64], [5001 x i64]* @sortlist2, i32 0, i64 5000), align 4
+  %0 = load i64, ptr getelementptr inbounds ([5001 x i64], ptr @sortlist2, i32 0, i64 5000), align 4
   ret i64 %0
 }
 
 ; Load an address with a ridiculously large offset.
 ; rdar://12505553
- at pd2 = common global i8* null, align 8
+ at pd2 = common global ptr null, align 8
 
 define signext i8 @foo3() nounwind ssp {
 entry:
@@ -40,8 +40,8 @@ entry:
 ; CHECK: mov x[[REG:[0-9]+]], #12274
 ; CHECK: movk x[[REG]], #29646, lsl #16
 ; CHECK: movk x[[REG]], #2874, lsl #32
-  %0 = load i8*, i8** @pd2, align 8
-  %arrayidx = getelementptr inbounds i8, i8* %0, i64 12345678901234
-  %1 = load i8, i8* %arrayidx, align 1
+  %0 = load ptr, ptr @pd2, align 8
+  %arrayidx = getelementptr inbounds i8, ptr %0, i64 12345678901234
+  %1 = load i8, ptr %arrayidx, align 1
   ret i8 %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
index 1b65d9cbce14a..c15a5dbbf672b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-alloca.ll
@@ -4,10 +4,10 @@
 %struct.S1Ty = type { i64 }
 %struct.S2Ty = type { %struct.S1Ty, %struct.S1Ty }
 
-define void @takeS1(%struct.S1Ty* %V) nounwind {
+define void @takeS1(ptr %V) nounwind {
 entry:
-  %V.addr = alloca %struct.S1Ty*, align 8
-  store %struct.S1Ty* %V, %struct.S1Ty** %V.addr, align 8
+  %V.addr = alloca ptr, align 8
+  store ptr %V, ptr %V.addr, align 8
   ret void
 }
 
@@ -18,7 +18,7 @@ entry:
 ; CHECK: mov [[REG:x[0-9]+]], sp
 ; CHECK-NEXT: add x0, [[REG]], #8
   %E = alloca %struct.S2Ty, align 4
-  %B = getelementptr inbounds %struct.S2Ty, %struct.S2Ty* %E, i32 0, i32 1
-  call void @takeS1(%struct.S1Ty* %B)
+  %B = getelementptr inbounds %struct.S2Ty, ptr %E, i32 0, i32 1
+  call void @takeS1(ptr %B)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
index d563ccb851ce1..04617d1c89f1b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
@@ -2,8 +2,8 @@
 
 define void @branch1() nounwind uwtable ssp {
   %x = alloca i32, align 4
-  store i32 0, i32* %x, align 4
-  %1 = load i32, i32* %x, align 4
+  store i32 0, ptr %x, align 4
+  %1 = load i32, ptr %x, align 4
   %2 = icmp ne i32 %1, 0
   br i1 %2, label %3, label %4
 
@@ -19,41 +19,41 @@ define void @branch2() nounwind uwtable ssp {
   %x = alloca i32, align 4
   %y = alloca i32, align 4
   %z = alloca i32, align 4
-  store i32 0, i32* %1
-  store i32 1, i32* %y, align 4
-  store i32 1, i32* %x, align 4
-  store i32 0, i32* %z, align 4
-  %2 = load i32, i32* %x, align 4
+  store i32 0, ptr %1
+  store i32 1, ptr %y, align 4
+  store i32 1, ptr %x, align 4
+  store i32 0, ptr %z, align 4
+  %2 = load i32, ptr %x, align 4
   %3 = icmp ne i32 %2, 0
   br i1 %3, label %4, label %5
 
 ; <label>:4                                       ; preds = %0
-  store i32 0, i32* %1
+  store i32 0, ptr %1
   br label %14
 
 ; <label>:5                                       ; preds = %0
-  %6 = load i32, i32* %y, align 4
+  %6 = load i32, ptr %y, align 4
   %7 = icmp ne i32 %6, 0
   br i1 %7, label %8, label %13
 
 ; <label>:8                                       ; preds = %5
-  %9 = load i32, i32* %z, align 4
+  %9 = load i32, ptr %z, align 4
   %10 = icmp ne i32 %9, 0
   br i1 %10, label %11, label %12
 
 ; <label>:11                                      ; preds = %8
-  store i32 1, i32* %1
+  store i32 1, ptr %1
   br label %14
 
 ; <label>:12                                      ; preds = %8
-  store i32 0, i32* %1
+  store i32 0, ptr %1
   br label %14
 
 ; <label>:13                                      ; preds = %5
   br label %14
 
 ; <label>:14                                      ; preds = %4, %11, %12, %13
-  %15 = load i32, i32* %1
+  %15 = load i32, ptr %1
   ret void
 }
 
@@ -89,11 +89,11 @@ entry:
   %b.addr = alloca i16, align 2
   %c.addr = alloca i32, align 4
   %d.addr = alloca i64, align 8
-  store i8 %a, i8* %a.addr, align 1
-  store i16 %b, i16* %b.addr, align 2
-  store i32 %c, i32* %c.addr, align 4
-  store i64 %d, i64* %d.addr, align 8
-  %0 = load i16, i16* %b.addr, align 2
+  store i8 %a, ptr %a.addr, align 1
+  store i16 %b, ptr %b.addr, align 2
+  store i32 %c, ptr %c.addr, align 4
+  store i64 %d, ptr %d.addr, align 8
+  %0 = load i16, ptr %b.addr, align 2
 ; CHECK: tbz {{w[0-9]+}}, #0, LBB4_2
   %conv = trunc i16 %0 to i1
   br i1 %conv, label %if.then, label %if.end
@@ -103,7 +103,7 @@ if.then:                                          ; preds = %entry
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %1 = load i32, i32* %c.addr, align 4
+  %1 = load i32, ptr %c.addr, align 4
 ; CHECK: tbz w{{[0-9]+}}, #0, LBB4_4
   %conv1 = trunc i32 %1 to i1
   br i1 %conv1, label %if.then3, label %if.end4
@@ -113,7 +113,7 @@ if.then3:                                         ; preds = %if.end
   br label %if.end4
 
 if.end4:                                          ; preds = %if.then3, %if.end
-  %2 = load i64, i64* %d.addr, align 8
+  %2 = load i64, ptr %d.addr, align 8
 ; CHECK: tbz w{{[0-9]+}}, #0, LBB4_6
   %conv5 = trunc i64 %2 to i1
   br i1 %conv5, label %if.then7, label %if.end8
@@ -123,7 +123,7 @@ if.then7:                                         ; preds = %if.end4
   br label %if.end8
 
 if.end8:                                          ; preds = %if.then7, %if.end4
-  %3 = load i8, i8* %a.addr, align 1
+  %3 = load i8, ptr %a.addr, align 1
   ret i8 %3
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll
index fc4e52157845c..56cd21e98a39a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll
@@ -22,8 +22,8 @@ entry:
 define i32 @call1(i32 %a) nounwind {
 entry:
   %a.addr = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %tmp = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %tmp = load i32, ptr %a.addr, align 4
   ret i32 %tmp
 }
 
@@ -34,8 +34,8 @@ entry:
 ; CHECK-NEXT:  ldur w0, [x29, #-4]
 ; CHECK-NEXT:  bl _call1
   %a.addr = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %tmp = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %tmp = load i32, ptr %a.addr, align 4
   %call = call i32 @call1(i32 %tmp)
   ret i32 %call
 }
@@ -63,7 +63,7 @@ entry:
 
 declare void @foo_zext_(i8 %a, i16 %b)
 
-define i32 @t1(i32 %argc, i8** nocapture %argv) {
+define i32 @t1(i32 %argc, ptr nocapture %argv) {
 entry:
 ; CHECK-LABEL: @t1
 ; The last parameter will be passed on stack via i8.
@@ -259,10 +259,10 @@ define void @call_blr(i64 %Fn, i1 %c) {
 ; CHECK:       blr
   br i1 %c, label %bb1, label %bb2
 bb1:
-  %1 = inttoptr i64 %Fn to void (i64)*
+  %1 = inttoptr i64 %Fn to ptr
   br label %bb2
 bb2:
-  %2 = phi void (i64)* [ %1, %bb1 ], [ undef, %0 ]
+  %2 = phi ptr [ %1, %bb1 ], [ undef, %0 ]
   call void %2(i64 1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
index 26ce3a3b94aa8..ba443ebd1bbfa 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
@@ -23,20 +23,20 @@ entry:
   %b.addr = alloca i16, align 2
   %c.addr = alloca i32, align 4
   %d.addr = alloca i64, align 8
-  store i8 %a, i8* %a.addr, align 1
-  store i16 %b, i16* %b.addr, align 2
-  store i32 %c, i32* %c.addr, align 4
-  store i64 %d, i64* %d.addr, align 8
-  %tmp = load i64, i64* %d.addr, align 8
+  store i8 %a, ptr %a.addr, align 1
+  store i16 %b, ptr %b.addr, align 2
+  store i32 %c, ptr %c.addr, align 4
+  store i64 %d, ptr %d.addr, align 8
+  %tmp = load i64, ptr %d.addr, align 8
   %conv = trunc i64 %tmp to i32
-  store i32 %conv, i32* %c.addr, align 4
-  %tmp1 = load i32, i32* %c.addr, align 4
+  store i32 %conv, ptr %c.addr, align 4
+  %tmp1 = load i32, ptr %c.addr, align 4
   %conv2 = trunc i32 %tmp1 to i16
-  store i16 %conv2, i16* %b.addr, align 2
-  %tmp3 = load i16, i16* %b.addr, align 2
+  store i16 %conv2, ptr %b.addr, align 2
+  %tmp3 = load i16, ptr %b.addr, align 2
   %conv4 = trunc i16 %tmp3 to i8
-  store i8 %conv4, i8* %a.addr, align 1
-  %tmp5 = load i8, i8* %a.addr, align 1
+  store i8 %conv4, ptr %a.addr, align 1
+  %tmp5 = load i8, ptr %a.addr, align 1
   %conv6 = zext i8 %tmp5 to i32
   ret i32 %conv6
 }
@@ -61,20 +61,20 @@ entry:
   %b.addr = alloca i16, align 2
   %c.addr = alloca i32, align 4
   %d.addr = alloca i64, align 8
-  store i8 %a, i8* %a.addr, align 1
-  store i16 %b, i16* %b.addr, align 2
-  store i32 %c, i32* %c.addr, align 4
-  store i64 %d, i64* %d.addr, align 8
-  %tmp = load i8, i8* %a.addr, align 1
+  store i8 %a, ptr %a.addr, align 1
+  store i16 %b, ptr %b.addr, align 2
+  store i32 %c, ptr %c.addr, align 4
+  store i64 %d, ptr %d.addr, align 8
+  %tmp = load i8, ptr %a.addr, align 1
   %conv = zext i8 %tmp to i16
-  store i16 %conv, i16* %b.addr, align 2
-  %tmp1 = load i16, i16* %b.addr, align 2
+  store i16 %conv, ptr %b.addr, align 2
+  %tmp1 = load i16, ptr %b.addr, align 2
   %conv2 = zext i16 %tmp1 to i32
-  store i32 %conv2, i32* %c.addr, align 4
-  %tmp3 = load i32, i32* %c.addr, align 4
+  store i32 %conv2, ptr %c.addr, align 4
+  %tmp3 = load i32, ptr %c.addr, align 4
   %conv4 = zext i32 %tmp3 to i64
-  store i64 %conv4, i64* %d.addr, align 8
-  %tmp5 = load i64, i64* %d.addr, align 8
+  store i64 %conv4, ptr %d.addr, align 8
+  %tmp5 = load i64, ptr %d.addr, align 8
   ret i64 %tmp5
 }
 
@@ -116,20 +116,20 @@ entry:
   %b.addr = alloca i16, align 2
   %c.addr = alloca i32, align 4
   %d.addr = alloca i64, align 8
-  store i8 %a, i8* %a.addr, align 1
-  store i16 %b, i16* %b.addr, align 2
-  store i32 %c, i32* %c.addr, align 4
-  store i64 %d, i64* %d.addr, align 8
-  %tmp = load i8, i8* %a.addr, align 1
+  store i8 %a, ptr %a.addr, align 1
+  store i16 %b, ptr %b.addr, align 2
+  store i32 %c, ptr %c.addr, align 4
+  store i64 %d, ptr %d.addr, align 8
+  %tmp = load i8, ptr %a.addr, align 1
   %conv = sext i8 %tmp to i16
-  store i16 %conv, i16* %b.addr, align 2
-  %tmp1 = load i16, i16* %b.addr, align 2
+  store i16 %conv, ptr %b.addr, align 2
+  %tmp1 = load i16, ptr %b.addr, align 2
   %conv2 = sext i16 %tmp1 to i32
-  store i32 %conv2, i32* %c.addr, align 4
-  %tmp3 = load i32, i32* %c.addr, align 4
+  store i32 %conv2, ptr %c.addr, align 4
+  %tmp3 = load i32, ptr %c.addr, align 4
   %conv4 = sext i32 %tmp3 to i64
-  store i64 %conv4, i64* %d.addr, align 8
-  %tmp5 = load i64, i64* %d.addr, align 8
+  store i64 %conv4, ptr %d.addr, align 8
+  %tmp5 = load i64, ptr %d.addr, align 8
   ret i64 %tmp5
 }
 
@@ -410,9 +410,9 @@ define void @stack_trunc() nounwind {
 ; CHECK: add  sp, sp, #16
   %a = alloca i8, align 1
   %b = alloca i64, align 8
-  %c = load i64, i64* %b, align 8
+  %c = load i64, ptr %b, align 8
   %d = trunc i64 %c to i8
-  store i8 %d, i8* %a, align 1
+  store i8 %d, ptr %a, align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
index 8338475399243..2f263343ada76 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
@@ -9,7 +9,7 @@ entry:
 ; CHECK: adrp [[REG:x[0-9]+]], _seed at GOTPAGE
 ; CHECK: ldr  [[REG2:x[0-9]+]], [[[REG]], _seed at GOTPAGEOFF]
 ; CHECK: str  {{x[0-9]+}}, [[[REG2]]]
-  store i64 74755, i64* @seed, align 8
+  store i64 74755, ptr @seed, align 8
   ret void
 }
 
@@ -30,12 +30,12 @@ entry:
 ; CHECK: adrp [[REG1:x[0-9]+]], _seed at GOTPAGE
 ; CHECK: ldr  [[REG1]], [[[REG1]], _seed at GOTPAGEOFF]
 ; CHECK: ldr  {{x[0-9]+}}, [[[REG1]]]
-  %0 = load i64, i64* @seed, align 8
+  %0 = load i64, ptr @seed, align 8
   %mul = mul nsw i64 %0, 1309
   %add = add nsw i64 %mul, 13849
   %and = and i64 %add, 65535
-  store i64 %and, i64* @seed, align 8
-  %1 = load i64, i64* @seed, align 8
+  store i64 %and, ptr @seed, align 8
+  %1 = load i64, ptr @seed, align 8
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
index dc64123b33c0e..f853c0802cb1b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
@@ -44,22 +44,22 @@ entry:
   ret i32 %conv
 }
 
-define i32 @icmp_eq_ptr(i8* %a) {
+define i32 @icmp_eq_ptr(ptr %a) {
 entry:
 ; CHECK-LABEL: icmp_eq_ptr
 ; CHECK:       cmp x0, #0
 ; CHECK-NEXT:  cset {{.+}}, eq
-  %cmp = icmp eq i8* %a, null
+  %cmp = icmp eq ptr %a, null
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
-define i32 @icmp_ne_ptr(i8* %a) {
+define i32 @icmp_ne_ptr(ptr %a) {
 entry:
 ; CHECK-LABEL: icmp_ne_ptr
 ; CHECK:       cmp x0, #0
 ; CHECK-NEXT:  cset {{.+}}, ne
-  %cmp = icmp ne i8* %a, null
+  %cmp = icmp ne ptr %a, null
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
index 5f90bab9cf46e..5131182b89be2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
@@ -1,36 +1,36 @@
 ; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
 
- at fn.table = internal global [2 x i8*] [i8* blockaddress(@fn, %ZERO), i8* blockaddress(@fn, %ONE)], align 8
+ at fn.table = internal global [2 x ptr] [ptr blockaddress(@fn, %ZERO), ptr blockaddress(@fn, %ONE)], align 8
 
 define i32 @fn(i32 %target) nounwind {
 entry:
 ; CHECK-LABEL: fn
   %retval = alloca i32, align 4
   %target.addr = alloca i32, align 4
-  store i32 %target, i32* %target.addr, align 4
-  %0 = load i32, i32* %target.addr, align 4
+  store i32 %target, ptr %target.addr, align 4
+  %0 = load i32, ptr %target.addr, align 4
   %idxprom = zext i32 %0 to i64
-  %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @fn.table, i32 0, i64 %idxprom
-  %1 = load i8*, i8** %arrayidx, align 8
+  %arrayidx = getelementptr inbounds [2 x ptr], ptr @fn.table, i32 0, i64 %idxprom
+  %1 = load ptr, ptr %arrayidx, align 8
   br label %indirectgoto
 
 ZERO:                                             ; preds = %indirectgoto
 ; CHECK: LBB0_1
-  store i32 0, i32* %retval
+  store i32 0, ptr %retval
   br label %return
 
 ONE:                                              ; preds = %indirectgoto
 ; CHECK: LBB0_2
-  store i32 1, i32* %retval
+  store i32 1, ptr %retval
   br label %return
 
 return:                                           ; preds = %ONE, %ZERO
-  %2 = load i32, i32* %retval
+  %2 = load i32, ptr %retval
   ret i32 %2
 
 indirectgoto:                                     ; preds = %entry
 ; CHECK:      ldr [[REG:x[0-9]+]], [sp]
 ; CHECK-NEXT: br [[REG]]
-  %indirect.goto.dest = phi i8* [ %1, %entry ]
-  indirectbr i8* %indirect.goto.dest, [label %ZERO, label %ONE]
+  %indirect.goto.dest = phi ptr [ %1, %entry ]
+  indirectbr ptr %indirect.goto.dest, [label %ZERO, label %ONE]
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
index d8faf08f60d43..40966cb2a2cc3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
@@ -11,11 +11,11 @@ define void @t1() {
 ; ARM64: mov x2, #80
 ; ARM64: uxtb w1, [[REG]]
 ; ARM64: bl _memset
-  call void @llvm.memset.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i8 0, i64 80, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 16 @message, i8 0, i64 80, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 
 define void @t2() {
 ; ARM64-LABEL: t2
@@ -25,11 +25,11 @@ define void @t2() {
 ; ARM64: add x1, x8, _message at PAGEOFF
 ; ARM64: mov x2, #80
 ; ARM64: bl _memcpy
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 80, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 @temp, ptr align 16 @message, i64 80, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
 
 define void @t3() {
 ; ARM64-LABEL: t3
@@ -39,11 +39,11 @@ define void @t3() {
 ; ARM64: add x1, x8, _message at PAGEOFF
 ; ARM64: mov x2, #20
 ; ARM64: bl _memmove
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 20, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr align 16 @temp, ptr align 16 @message, i64 20, i1 false)
   ret void
 }
 
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
 
 define void @t4() {
 ; ARM64-LABEL: t4
@@ -58,7 +58,7 @@ define void @t4() {
 ; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #16]
 ; ARM64: strb [[REG3]], [[[REG0]], #16]
 ; ARM64: ret
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 16 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 16 @temp, ptr align 16 @message, i64 17, i1 false)
   ret void
 }
 
@@ -75,7 +75,7 @@ define void @t5() {
 ; ARM64: ldrb [[REG4:w[0-9]+]], [[[REG1]], #16]
 ; ARM64: strb [[REG4]], [[[REG0]], #16]
 ; ARM64: ret
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 8 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 17, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 @temp, ptr align 8 @message, i64 17, i1 false)
   ret void
 }
 
@@ -92,7 +92,7 @@ define void @t6() {
 ; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #8]
 ; ARM64: strb [[REG3]], [[[REG0]], #8]
 ; ARM64: ret
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 4 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 9, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 @temp, ptr align 4 @message, i64 9, i1 false)
   ret void
 }
 
@@ -111,7 +111,7 @@ define void @t7() {
 ; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #6]
 ; ARM64: strb [[REG3]], [[[REG0]], #6]
 ; ARM64: ret
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 2 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 7, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 2 @temp, ptr align 2 @message, i64 7, i1 false)
   ret void
 }
 
@@ -130,11 +130,11 @@ define void @t8() {
 ; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #3]
 ; ARM64: strb [[REG3]], [[[REG0]], #3]
 ; ARM64: ret
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr inbounds ([80 x i8], [80 x i8]* @temp, i32 0, i32 0), i8* align 1 getelementptr inbounds ([80 x i8], [80 x i8]* @message, i32 0, i32 0), i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 1 @temp, ptr align 1 @message, i64 4, i1 false)
   ret void
 }
 
-define void @test_distant_memcpy(i8* %dst) {
+define void @test_distant_memcpy(ptr %dst) {
 ; ARM64-LABEL: test_distant_memcpy:
 ; ARM64: mov [[ARRAY:x[0-9]+]], sp
 ; ARM64: mov [[OFFSET:x[0-9]+]], #8000
@@ -142,7 +142,7 @@ define void @test_distant_memcpy(i8* %dst) {
 ; ARM64: ldrb [[BYTE:w[0-9]+]], [x[[ADDR]]]
 ; ARM64: strb [[BYTE]], [x0]
   %array = alloca i8, i32 8192
-  %elem = getelementptr i8, i8* %array, i32 8000
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i1 false)
+  %elem = getelementptr i8, ptr %array, i32 8000
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %elem, i64 1, i1 false)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
index 81c9933a86374..317c28fbc3f9e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
@@ -15,8 +15,8 @@ entry:
 ; CHECK-NEXT: ldr w0, [sp, #12]
 ; CHECK: ret
   %a.addr = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %tmp = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %tmp = load i32, ptr %a.addr, align 4
   ret i32 %tmp
 }
 
@@ -27,8 +27,8 @@ entry:
 ; CHECK-NEXT: ldr x0, [sp, #8]
 ; CHECK: ret
   %a.addr = alloca i64, align 8
-  store i64 %a, i64* %a.addr, align 8
-  %tmp = load i64, i64* %a.addr, align 8
+  store i64 %a, ptr %a.addr, align 8
+  %tmp = load i64, ptr %a.addr, align 8
   ret i64 %tmp
 }
 
@@ -37,8 +37,8 @@ entry:
 ; CHECK: @ret_i16
 ; CHECK: sxth	w0, {{w[0-9]+}}
   %a.addr = alloca i16, align 1
-  store i16 %a, i16* %a.addr, align 1
-  %0 = load i16, i16* %a.addr, align 1
+  store i16 %a, ptr %a.addr, align 1
+  %0 = load i16, ptr %a.addr, align 1
   ret i16 %0
 }
 
@@ -47,8 +47,8 @@ entry:
 ; CHECK: @ret_i8
 ; CHECK: sxtb	w0, {{w[0-9]+}}
   %a.addr = alloca i8, align 1
-  store i8 %a, i8* %a.addr, align 1
-  %0 = load i8, i8* %a.addr, align 1
+  store i8 %a, ptr %a.addr, align 1
+  %0 = load i8, ptr %a.addr, align 1
   ret i8 %0
 }
 
@@ -58,7 +58,7 @@ entry:
 ; CHECK: and [[REG:w[0-9]+]], {{w[0-9]+}}, #0x1
 ; CHECK: sbfx w0, [[REG]], #0, #1
   %a.addr = alloca i1, align 1
-  store i1 %a, i1* %a.addr, align 1
-  %0 = load i1, i1* %a.addr, align 1
+  store i1 %a, ptr %a.addr, align 1
+  %0 = load i1, ptr %a.addr, align 1
   ret i1 %0
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll
index 47d4cdb3321b7..bfa29aaedaaf6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-store.ll
@@ -1,30 +1,30 @@
 ; RUN: llc -mtriple=aarch64-unknown-unknown                             -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-unknown-unknown -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s
 
-define void @store_i8(i8* %a) {
+define void @store_i8(ptr %a) {
 ; CHECK-LABEL: store_i8
 ; CHECK: strb  wzr, [x0]
-  store i8 0, i8* %a
+  store i8 0, ptr %a
   ret void
 }
 
-define void @store_i16(i16* %a) {
+define void @store_i16(ptr %a) {
 ; CHECK-LABEL: store_i16
 ; CHECK: strh  wzr, [x0]
-  store i16 0, i16* %a
+  store i16 0, ptr %a
   ret void
 }
 
-define void @store_i32(i32* %a) {
+define void @store_i32(ptr %a) {
 ; CHECK-LABEL: store_i32
 ; CHECK: str  wzr, [x0]
-  store i32 0, i32* %a
+  store i32 0, ptr %a
   ret void
 }
 
-define void @store_i64(i64* %a) {
+define void @store_i64(ptr %a) {
 ; CHECK-LABEL: store_i64
 ; CHECK: str  xzr, [x0]
-  store i64 0, i64* %a
+  store i64 0, ptr %a
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll
index 901911e68939d..17a36ae3801eb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel.ll
@@ -8,9 +8,9 @@ entry:
 ; CHECK-NEXT: str [[REGISTER]], [sp, #12]
 ; CHECK: ret
   %a.addr = alloca i32, align 4
-  store i32 %a, i32* %a.addr
-  %tmp = load i32, i32* %a.addr
-  store i32 %tmp, i32* %a.addr
+  store i32 %a, ptr %a.addr
+  %tmp = load i32, ptr %a.addr
+  store i32 %tmp, ptr %a.addr
   ret void
 }
 
@@ -21,9 +21,9 @@ define void @t1(i64 %a) nounwind {
 ; CHECK-NEXT: str [[REGISTER]], [sp, #8]
 ; CHECK: ret
   %a.addr = alloca i64, align 4
-  store i64 %a, i64* %a.addr
-  %tmp = load i64, i64* %a.addr
-  store i64 %tmp, i64* %a.addr
+  store i64 %a, ptr %a.addr
+  %tmp = load i64, ptr %a.addr
+  store i64 %tmp, ptr %a.addr
   ret void
 }
 
@@ -38,48 +38,48 @@ entry:
 ; CHECK: add sp, sp, #16
 ; CHECK: ret
   %a.addr = alloca i1, align 1
-  store i1 %a, i1* %a.addr, align 1
-  %0 = load i1, i1* %a.addr, align 1
+  store i1 %a, ptr %a.addr, align 1
+  %0 = load i1, ptr %a.addr, align 1
   ret i1 %0
 }
 
-define i32 @t2(i32 *%ptr) nounwind {
+define i32 @t2(ptr %ptr) nounwind {
 entry:
 ; CHECK-LABEL: t2:
 ; CHECK: ldur w0, [x0, #-4]
 ; CHECK: ret
-  %0 = getelementptr i32, i32 *%ptr, i32 -1
-  %1 = load i32, i32* %0, align 4
+  %0 = getelementptr i32, ptr %ptr, i32 -1
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
-define i32 @t3(i32 *%ptr) nounwind {
+define i32 @t3(ptr %ptr) nounwind {
 entry:
 ; CHECK-LABEL: t3:
 ; CHECK: ldur w0, [x0, #-256]
 ; CHECK: ret
-  %0 = getelementptr i32, i32 *%ptr, i32 -64
-  %1 = load i32, i32* %0, align 4
+  %0 = getelementptr i32, ptr %ptr, i32 -64
+  %1 = load i32, ptr %0, align 4
   ret i32 %1
 }
 
-define void @t4(i32 *%ptr) nounwind {
+define void @t4(ptr %ptr) nounwind {
 entry:
 ; CHECK-LABEL: t4:
 ; CHECK: stur wzr, [x0, #-4]
 ; CHECK: ret
-  %0 = getelementptr i32, i32 *%ptr, i32 -1
-  store i32 0, i32* %0, align 4
+  %0 = getelementptr i32, ptr %ptr, i32 -1
+  store i32 0, ptr %0, align 4
   ret void
 }
 
-define void @t5(i32 *%ptr) nounwind {
+define void @t5(ptr %ptr) nounwind {
 entry:
 ; CHECK-LABEL: t5:
 ; CHECK: stur wzr, [x0, #-256]
 ; CHECK: ret
-  %0 = getelementptr i32, i32 *%ptr, i32 -64
-  store i32 0, i32* %0, align 4
+  %0 = getelementptr i32, ptr %ptr, i32 -64
+  store i32 0, ptr %0, align 4
   ret void
 }
 
@@ -92,7 +92,7 @@ define void @t6() nounwind {
 
 declare void @llvm.trap() nounwind
 
-define void @ands(i32* %addr) {
+define void @ands(ptr %addr) {
 ; FIXME: 'select i1 undef' makes this unreliable (ub?).
 ; COM: CHECK-LABEL: ands:
 ; COM: CHECK: tst [[COND:w[0-9]+]], #0x1
@@ -101,7 +101,7 @@ define void @ands(i32* %addr) {
 ; COM: CHECK-NEXT: csel [[COND]],
 entry:
   %cond91 = select i1 undef, i32 1, i32 2
-  store i32 %cond91, i32* %addr, align 4
+  store i32 %cond91, ptr %addr, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll b/llvm/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
index 48f8bd8e13022..8ef31720806f1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fastcc-tailcall.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
-define void @caller(i32* nocapture %p, i32 %a, i32 %b) nounwind optsize ssp {
+define void @caller(ptr nocapture %p, i32 %a, i32 %b) nounwind optsize ssp {
 ; CHECK-NOT: stp
 ; CHECK: b       {{_callee|callee}}
 ; CHECK-NOT: ldp
@@ -8,14 +8,14 @@ define void @caller(i32* nocapture %p, i32 %a, i32 %b) nounwind optsize ssp {
   %1 = icmp eq i32 %b, 0
   br i1 %1, label %3, label %2
 
-  tail call fastcc void @callee(i32* %p, i32 %a) optsize
+  tail call fastcc void @callee(ptr %p, i32 %a) optsize
   br label %3
 
   ret void
 }
 
-define internal fastcc void @callee(i32* nocapture %p, i32 %a) nounwind optsize noinline ssp {
-  store volatile i32 %a, i32* %p, align 4, !tbaa !0
+define internal fastcc void @callee(ptr nocapture %p, i32 %a) nounwind optsize noinline ssp {
+  store volatile i32 %a, ptr %p, align 4, !tbaa !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll b/llvm/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
index da92c6da6dfc3..9d26ea0724073 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
@@ -2,17 +2,17 @@
 ; sext(a) + sext(b) != sext(a + b)
 ; RUN: llc -fast-isel -mtriple=arm64-apple-darwin %s -O0 -o - | FileCheck %s
 
-define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
+define zeroext i8 @gep_promotion(ptr %ptr) nounwind uwtable ssp {
 entry:
-  %ptr.addr = alloca i8*, align 8
+  %ptr.addr = alloca ptr, align 8
   %add = add i8 64, 64 ; 0x40 + 0x40
-  %0 = load i8*, i8** %ptr.addr, align 8
+  %0 = load ptr, ptr %ptr.addr, align 8
 
   ; CHECK-LABEL: _gep_promotion:
   ; CHECK: ldrb {{[a-z][0-9]+}}, {{\[[a-z][0-9]+\]}}
-  %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
+  %arrayidx = getelementptr inbounds i8, ptr %0, i8 %add
 
-  %1 = load i8, i8* %arrayidx, align 1
+  %1 = load i8, ptr %arrayidx, align 1
   ret i8 %1
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll b/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll
index d83da9db44b6f..f12f3719e10cf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll
@@ -1,15 +1,14 @@
 ; RUN: llc < %s -O=3 -mtriple=arm64-apple-ios -mcpu=cyclone -mattr=+fullfp16 -enable-unsafe-fp-math -verify-machineinstrs | FileCheck %s
 
-define void @foo_2d(double* %src) {
+define void @foo_2d(ptr %src) {
 ; CHECK-LABEL: %entry
 ; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
 ; CHECK: fmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
 entry:
-  %arrayidx1 = getelementptr inbounds double, double* %src, i64 5
-  %arrayidx2 = getelementptr inbounds double, double* %src, i64 11
-  %tmp = bitcast double* %arrayidx1 to <2 x double>*
-  %tmp1 = load double, double* %arrayidx2, align 8
-  %tmp2 = load double, double* %arrayidx1, align 8
+  %arrayidx1 = getelementptr inbounds double, ptr %src, i64 5
+  %arrayidx2 = getelementptr inbounds double, ptr %src, i64 11
+  %tmp1 = load double, ptr %arrayidx2, align 8
+  %tmp2 = load double, ptr %arrayidx1, align 8
   %fmul = fmul fast double %tmp1, %tmp1
   %fmul2 = fmul fast double %tmp2, 0x3F94AFD6A052BF5B
   %fadd = fadd fast double %fmul, %fmul2
@@ -22,8 +21,8 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx3 = getelementptr inbounds double, double* %src, i64 %indvars.iv.next
-  %tmp3 = load double, double* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %src, i64 %indvars.iv.next
+  %tmp3 = load double, ptr %arrayidx3, align 8
   %add = fadd fast double %tmp3, %tmp3
   %mul = fmul fast double %add, %fadd
   %e1 = insertelement <2 x double> undef, double %add, i32 0
@@ -40,22 +39,21 @@ for.body:                                         ; preds = %for.body, %entry
   %e7 = insertelement <2 x double> undef, double %mul, i32 0
   %e8 = insertelement <2 x double> %e7, double %mul, i32 1
   %e9 = fmul fast <2 x double>  %addx, %add3
-  store <2 x double> %e9, <2 x double>* %tmp, align 8
+  store <2 x double> %e9, ptr %arrayidx1, align 8
   %e10 = extractelement <2 x double> %add3, i32 0
   %mul3 = fmul fast double %mul, %e10
   %add4 = fadd fast double %mul3, %mul
-  store double %add4, double* %arrayidx2, align 8
+  store double %add4, ptr %arrayidx2, align 8
   %exitcond = icmp eq i64 %indvars.iv.next, 25
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
   ret void
 }
-define void @foo_2s(float* %src) {
+define void @foo_2s(ptr %src) {
 entry:
-  %arrayidx1 = getelementptr inbounds float, float* %src, i64 5
-  %arrayidx2 = getelementptr inbounds float, float* %src, i64 11
-  %tmp = bitcast float* %arrayidx1 to <2 x float>*
+  %arrayidx1 = getelementptr inbounds float, ptr %src, i64 5
+  %arrayidx2 = getelementptr inbounds float, ptr %src, i64 11
   br label %for.body
 
 ; CHECK-LABEL: %for.body
@@ -65,8 +63,8 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx3 = getelementptr inbounds float, float* %src, i64 %indvars.iv.next
-  %tmp1 = load float, float* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds float, ptr %src, i64 %indvars.iv.next
+  %tmp1 = load float, ptr %arrayidx3, align 8
   %add = fadd fast float %tmp1, %tmp1
   %mul = fmul fast float %add, %add
   %e1 = insertelement <2 x float> undef, float %add, i32 0
@@ -83,22 +81,21 @@ for.body:                                         ; preds = %for.body, %entry
   %e7 = insertelement <2 x float> undef, float %mul, i32 0
   %e8 = insertelement <2 x float> %e7, float %mul, i32 1
   %e9 = fmul fast <2 x float>  %addx, %add3
-  store <2 x float> %e9, <2 x float>* %tmp, align 8
+  store <2 x float> %e9, ptr %arrayidx1, align 8
   %e10 = extractelement <2 x float> %add3, i32 0
   %mul3 = fmul fast float %mul, %e10
   %add4 = fadd fast float %mul3, %mul
-  store float %add4, float* %arrayidx2, align 8
+  store float %add4, ptr %arrayidx2, align 8
   %exitcond = icmp eq i64 %indvars.iv.next, 25
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
   ret void
 }
-define void @foo_4s(float* %src) {
+define void @foo_4s(ptr %src) {
 entry:
-  %arrayidx1 = getelementptr inbounds float, float* %src, i64 5
-  %arrayidx2 = getelementptr inbounds float, float* %src, i64 11
-  %tmp = bitcast float* %arrayidx1 to <4 x float>*
+  %arrayidx1 = getelementptr inbounds float, ptr %src, i64 5
+  %arrayidx2 = getelementptr inbounds float, ptr %src, i64 11
   br label %for.body
 
 ; CHECK-LABEL: %for.body
@@ -107,8 +104,8 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx3 = getelementptr inbounds float, float* %src, i64 %indvars.iv.next
-  %tmp1 = load float, float* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds float, ptr %src, i64 %indvars.iv.next
+  %tmp1 = load float, ptr %arrayidx3, align 8
   %add = fadd fast float %tmp1, %tmp1
   %mul = fmul fast float %add, %add
   %e1 = insertelement <4 x float> undef, float %add, i32 0
@@ -125,10 +122,10 @@ for.body:                                         ; preds = %for.body, %entry
   %e7 = insertelement <4 x float> undef, float %mul, i32 0
   %e8 = insertelement <4 x float> %e7, float %mul, i32 1
   %e9 = fmul fast <4 x float>  %addx, %add3
-  store <4 x float> %e9, <4 x float>* %tmp, align 8
+  store <4 x float> %e9, ptr %arrayidx1, align 8
   %e10 = extractelement <4 x float> %add3, i32 0
   %mul3 = fmul fast float %mul, %e10
-  store float %mul3, float* %arrayidx2, align 8
+  store float %mul3, ptr %arrayidx2, align 8
   %exitcond = icmp eq i64 %indvars.iv.next, 25
   br i1 %exitcond, label %for.end, label %for.body
 
@@ -137,7 +134,7 @@ for.end:                                          ; preds = %for.body
 }
 
 define void @indexed_2s(<2 x float> %shuf, <2 x float> %add,
-                        <2 x float>* %pmul, <2 x float>* %pret) {
+                        ptr %pmul, ptr %pret) {
 ; CHECK-LABEL: %entry
 ; CHECK: for.body
 ; CHECK: fmla.2s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0]
@@ -148,21 +145,21 @@ entry:
 
 for.body:
   %i = phi i64 [ 0, %entry ], [ %inext, %for.body ]
-  %pmul_i = getelementptr inbounds <2 x float>, <2 x float>* %pmul, i64 %i
-  %pret_i = getelementptr inbounds <2 x float>, <2 x float>* %pret, i64 %i
+  %pmul_i = getelementptr inbounds <2 x float>, ptr %pmul, i64 %i
+  %pret_i = getelementptr inbounds <2 x float>, ptr %pret, i64 %i
 
-  %mul_i = load <2 x float>, <2 x float>* %pmul_i
+  %mul_i = load <2 x float>, ptr %pmul_i
 
   %mul = fmul fast <2 x float> %mul_i, %shuffle
   %muladd = fadd fast <2 x float> %mul, %add
 
-  store <2 x float> %muladd, <2 x float>* %pret_i, align 16
+  store <2 x float> %muladd, ptr %pret_i, align 16
   %inext = add i64 %i, 1
   br label %for.body
 }
 
 define void @indexed_2d(<2 x double> %shuf, <2 x double> %add,
-                        <2 x double>* %pmul, <2 x double>* %pret) {
+                        ptr %pmul, ptr %pret) {
 ; CHECK-LABEL: %entry
 ; CHECK: for.body
 ; CHECK: fmla.2d {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0]
@@ -173,21 +170,21 @@ entry:
 
 for.body:
   %i = phi i64 [ 0, %entry ], [ %inext, %for.body ]
-  %pmul_i = getelementptr inbounds <2 x double>, <2 x double>* %pmul, i64 %i
-  %pret_i = getelementptr inbounds <2 x double>, <2 x double>* %pret, i64 %i
+  %pmul_i = getelementptr inbounds <2 x double>, ptr %pmul, i64 %i
+  %pret_i = getelementptr inbounds <2 x double>, ptr %pret, i64 %i
 
-  %mul_i = load <2 x double>, <2 x double>* %pmul_i
+  %mul_i = load <2 x double>, ptr %pmul_i
 
   %mul = fmul fast <2 x double> %mul_i, %shuffle
   %muladd = fadd fast <2 x double> %mul, %add
 
-  store <2 x double> %muladd, <2 x double>* %pret_i, align 16
+  store <2 x double> %muladd, ptr %pret_i, align 16
   %inext = add i64 %i, 1
   br label %for.body
 }
 
 define void @indexed_4s(<4 x float> %shuf, <4 x float> %add,
-                        <4 x float>* %pmul, <4 x float>* %pret) {
+                        ptr %pmul, ptr %pret) {
 ; CHECK-LABEL: %entry
 ; CHECK: for.body
 ; CHECK: fmla.4s {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0]
@@ -198,21 +195,21 @@ entry:
 
 for.body:
   %i = phi i64 [ 0, %entry ], [ %inext, %for.body ]
-  %pmul_i = getelementptr inbounds <4 x float>, <4 x float>* %pmul, i64 %i
-  %pret_i = getelementptr inbounds <4 x float>, <4 x float>* %pret, i64 %i
+  %pmul_i = getelementptr inbounds <4 x float>, ptr %pmul, i64 %i
+  %pret_i = getelementptr inbounds <4 x float>, ptr %pret, i64 %i
 
-  %mul_i = load <4 x float>, <4 x float>* %pmul_i
+  %mul_i = load <4 x float>, ptr %pmul_i
 
   %mul = fmul fast <4 x float> %mul_i, %shuffle
   %muladd = fadd fast <4 x float> %mul, %add
 
-  store <4 x float> %muladd, <4 x float>* %pret_i, align 16
+  store <4 x float> %muladd, ptr %pret_i, align 16
   %inext = add i64 %i, 1
   br label %for.body
 }
 
 define void @indexed_4h(<4 x half> %shuf, <4 x half> %add,
-                        <4 x half>* %pmul, <4 x half>* %pret) {
+                        ptr %pmul, ptr %pret) {
 ; CHECK-LABEL: %entry
 ; CHECK: for.body
 ; CHECK: fmla.4h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0]
@@ -223,21 +220,21 @@ entry:
 
 for.body:
   %i = phi i64 [ 0, %entry ], [ %inext, %for.body ]
-  %pmul_i = getelementptr inbounds <4 x half>, <4 x half>* %pmul, i64 %i
-  %pret_i = getelementptr inbounds <4 x half>, <4 x half>* %pret, i64 %i
+  %pmul_i = getelementptr inbounds <4 x half>, ptr %pmul, i64 %i
+  %pret_i = getelementptr inbounds <4 x half>, ptr %pret, i64 %i
 
-  %mul_i = load <4 x half>, <4 x half>* %pmul_i
+  %mul_i = load <4 x half>, ptr %pmul_i
 
   %mul = fmul fast <4 x half> %mul_i, %shuffle
   %muladd = fadd fast <4 x half> %mul, %add
 
-  store <4 x half> %muladd, <4 x half>* %pret_i, align 16
+  store <4 x half> %muladd, ptr %pret_i, align 16
   %inext = add i64 %i, 1
   br label %for.body
 }
 
 define void @indexed_8h(<8 x half> %shuf, <8 x half> %add,
-                        <8 x half>* %pmul, <8 x half>* %pret) {
+                        ptr %pmul, ptr %pret) {
 ; CHECK-LABEL: %entry
 ; CHECK: for.body
 ; CHECK: fmla.8h {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}[0]
@@ -248,15 +245,15 @@ entry:
 
 for.body:
   %i = phi i64 [ 0, %entry ], [ %inext, %for.body ]
-  %pmul_i = getelementptr inbounds <8 x half>, <8 x half>* %pmul, i64 %i
-  %pret_i = getelementptr inbounds <8 x half>, <8 x half>* %pret, i64 %i
+  %pmul_i = getelementptr inbounds <8 x half>, ptr %pmul, i64 %i
+  %pret_i = getelementptr inbounds <8 x half>, ptr %pret, i64 %i
 
-  %mul_i = load <8 x half>, <8 x half>* %pmul_i
+  %mul_i = load <8 x half>, ptr %pmul_i
 
   %mul = fmul fast <8 x half> %mul_i, %shuffle
   %muladd = fadd fast <8 x half> %mul, %add
 
-  store <8 x half> %muladd, <8 x half>* %pret_i, align 16
+  store <8 x half> %muladd, ptr %pret_i, align 16
   %inext = add i64 %i, 1
   br label %for.body
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fml-combines.ll b/llvm/test/CodeGen/AArch64/arm64-fml-combines.ll
index 2b30c05d83c97..c9a7ebb7c98be 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fml-combines.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fml-combines.ll
@@ -1,11 +1,10 @@
 ; RUN: llc < %s -O3 -mtriple=arm64-apple-ios -enable-unsafe-fp-math -mattr=+fullfp16 | FileCheck %s
 ; RUN: llc < %s -O3 -mtriple=arm64-apple-ios -fp-contract=fast -mattr=+fullfp16 | FileCheck %s
 
-define void @foo_2d(double* %src) {
+define void @foo_2d(ptr %src) {
 entry:
-  %arrayidx1 = getelementptr inbounds double, double* %src, i64 5
-  %arrayidx2 = getelementptr inbounds double, double* %src, i64 11
-  %tmp = bitcast double* %arrayidx1 to <2 x double>*
+  %arrayidx1 = getelementptr inbounds double, ptr %src, i64 5
+  %arrayidx2 = getelementptr inbounds double, ptr %src, i64 11
   br label %for.body
 
 ; CHECK-LABEL: %for.body
@@ -15,8 +14,8 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %indvars.iv.next = sub nuw nsw i64 %indvars.iv, 1
-  %arrayidx3 = getelementptr inbounds double, double* %src, i64 %indvars.iv.next
-  %tmp1 = load double, double* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds double, ptr %src, i64 %indvars.iv.next
+  %tmp1 = load double, ptr %arrayidx3, align 8
   %add = fadd fast double %tmp1, %tmp1
   %mul = fmul fast double %add, %add
   %e1 = insertelement <2 x double> undef, double %add, i32 0
@@ -33,22 +32,21 @@ for.body:                                         ; preds = %for.body, %entry
   %e7 = insertelement <2 x double> undef, double %mul, i32 0
   %e8 = insertelement <2 x double> %e7, double %mul, i32 1
   %e9 = fmul fast <2 x double>  %subx, %sub3
-  store <2 x double> %e9, <2 x double>* %tmp, align 8
+  store <2 x double> %e9, ptr %arrayidx1, align 8
   %e10 = extractelement <2 x double> %sub3, i32 0
   %mul3 = fmul fast double %mul, %e10
   %sub4 = fsub fast double %mul, %mul3
-  store double %sub4, double* %arrayidx2, align 8
+  store double %sub4, ptr %arrayidx2, align 8
   %exitcond = icmp eq i64 %indvars.iv.next, 25
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
   ret void
 }
-define void @foo_2s(float* %src) {
+define void @foo_2s(ptr %src) {
 entry:
-  %arrayidx1 = getelementptr inbounds float, float* %src, i64 5
-  %arrayidx2 = getelementptr inbounds float, float* %src, i64 11
-  %tmp = bitcast float* %arrayidx1 to <2 x float>*
+  %arrayidx1 = getelementptr inbounds float, ptr %src, i64 5
+  %arrayidx2 = getelementptr inbounds float, ptr %src, i64 11
   br label %for.body
 
 ; CHECK-LABEL: %for.body
@@ -58,8 +56,8 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx3 = getelementptr inbounds float, float* %src, i64 %indvars.iv.next
-  %tmp1 = load float, float* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds float, ptr %src, i64 %indvars.iv.next
+  %tmp1 = load float, ptr %arrayidx3, align 8
   %add = fadd fast float %tmp1, %tmp1
   %mul = fmul fast float %add, %add
   %e1 = insertelement <2 x float> undef, float %add, i32 0
@@ -76,22 +74,21 @@ for.body:                                         ; preds = %for.body, %entry
   %e7 = insertelement <2 x float> undef, float %mul, i32 0
   %e8 = insertelement <2 x float> %e7, float %mul, i32 1
   %e9 = fmul fast <2 x float>  %addx, %add3
-  store <2 x float> %e9, <2 x float>* %tmp, align 8
+  store <2 x float> %e9, ptr %arrayidx1, align 8
   %e10 = extractelement <2 x float> %add3, i32 0
   %mul3 = fmul fast float %mul, %e10
   %add4 = fsub fast float %mul, %mul3
-  store float %add4, float* %arrayidx2, align 8
+  store float %add4, ptr %arrayidx2, align 8
   %exitcond = icmp eq i64 %indvars.iv.next, 25
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
   ret void
 }
-define void @foo_4s(float* %src) {
+define void @foo_4s(ptr %src) {
 entry:
-  %arrayidx1 = getelementptr inbounds float, float* %src, i64 5
-  %arrayidx2 = getelementptr inbounds float, float* %src, i64 11
-  %tmp = bitcast float* %arrayidx1 to <4 x float>*
+  %arrayidx1 = getelementptr inbounds float, ptr %src, i64 5
+  %arrayidx2 = getelementptr inbounds float, ptr %src, i64 11
   br label %for.body
 
 ; CHECK-LABEL: %for.body
@@ -100,8 +97,8 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-  %arrayidx3 = getelementptr inbounds float, float* %src, i64 %indvars.iv.next
-  %tmp1 = load float, float* %arrayidx3, align 8
+  %arrayidx3 = getelementptr inbounds float, ptr %src, i64 %indvars.iv.next
+  %tmp1 = load float, ptr %arrayidx3, align 8
   %add = fadd fast float %tmp1, %tmp1
   %mul = fmul fast float %add, %add
   %e1 = insertelement <4 x float> undef, float %add, i32 0
@@ -118,10 +115,10 @@ for.body:                                         ; preds = %for.body, %entry
   %e7 = insertelement <4 x float> undef, float %mul, i32 0
   %e8 = insertelement <4 x float> %e7, float %mul, i32 1
   %e9 = fmul fast <4 x float>  %addx, %add3
-  store <4 x float> %e9, <4 x float>* %tmp, align 8
+  store <4 x float> %e9, ptr %arrayidx1, align 8
   %e10 = extractelement <4 x float> %add3, i32 0
   %mul3 = fmul fast float %mul, %e10
-  store float %mul3, float* %arrayidx2, align 8
+  store float %mul3, ptr %arrayidx2, align 8
   %exitcond = icmp eq i64 %indvars.iv.next, 25
   br i1 %exitcond, label %for.end, label %for.body
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fmuladd.ll b/llvm/test/CodeGen/AArch64/arm64-fmuladd.ll
index 67e245a7bfa99..16ad3a0bc99e6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fmuladd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fmuladd.ll
@@ -1,80 +1,80 @@
 ; RUN: llc < %s -asm-verbose=false -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define float @test_f32(float* %A, float* %B, float* %C) nounwind {
+define float @test_f32(ptr %A, ptr %B, ptr %C) nounwind {
 ;CHECK-LABEL: test_f32:
 ;CHECK: fmadd
 ;CHECK-NOT: fmadd
-  %tmp1 = load float, float* %A
-  %tmp2 = load float, float* %B
-  %tmp3 = load float, float* %C
+  %tmp1 = load float, ptr %A
+  %tmp2 = load float, ptr %B
+  %tmp3 = load float, ptr %C
   %tmp4 = call float @llvm.fmuladd.f32(float %tmp1, float %tmp2, float %tmp3)
   ret float %tmp4
 }
 
-define <2 x float> @test_v2f32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+define <2 x float> @test_v2f32(ptr %A, ptr %B, ptr %C) nounwind {
 ;CHECK-LABEL: test_v2f32:
 ;CHECK: fmla.2s
 ;CHECK-NOT: fmla.2s
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
-  %tmp3 = load <2 x float>, <2 x float>* %C
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
+  %tmp3 = load <2 x float>, ptr %C
   %tmp4 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
   ret <2 x float> %tmp4
 }
 
-define <4 x float> @test_v4f32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+define <4 x float> @test_v4f32(ptr %A, ptr %B, ptr %C) nounwind {
 ;CHECK-LABEL: test_v4f32:
 ;CHECK: fmla.4s
 ;CHECK-NOT: fmla.4s
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
-  %tmp3 = load <4 x float>, <4 x float>* %C
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
+  %tmp3 = load <4 x float>, ptr %C
   %tmp4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
   ret <4 x float> %tmp4
 }
 
-define <8 x float> @test_v8f32(<8 x float>* %A, <8 x float>* %B, <8 x float>* %C) nounwind {
+define <8 x float> @test_v8f32(ptr %A, ptr %B, ptr %C) nounwind {
 ;CHECK-LABEL: test_v8f32:
 ;CHECK: fmla.4s
 ;CHECK: fmla.4s
 ;CHECK-NOT: fmla.4s
-  %tmp1 = load <8 x float>, <8 x float>* %A
-  %tmp2 = load <8 x float>, <8 x float>* %B
-  %tmp3 = load <8 x float>, <8 x float>* %C
+  %tmp1 = load <8 x float>, ptr %A
+  %tmp2 = load <8 x float>, ptr %B
+  %tmp3 = load <8 x float>, ptr %C
   %tmp4 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %tmp1, <8 x float> %tmp2, <8 x float> %tmp3)
   ret <8 x float> %tmp4
 }
 
-define double @test_f64(double* %A, double* %B, double* %C) nounwind {
+define double @test_f64(ptr %A, ptr %B, ptr %C) nounwind {
 ;CHECK-LABEL: test_f64:
 ;CHECK: fmadd
 ;CHECK-NOT: fmadd
-  %tmp1 = load double, double* %A
-  %tmp2 = load double, double* %B
-  %tmp3 = load double, double* %C
+  %tmp1 = load double, ptr %A
+  %tmp2 = load double, ptr %B
+  %tmp3 = load double, ptr %C
   %tmp4 = call double @llvm.fmuladd.f64(double %tmp1, double %tmp2, double %tmp3)
   ret double %tmp4
 }
 
-define <2 x double> @test_v2f64(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+define <2 x double> @test_v2f64(ptr %A, ptr %B, ptr %C) nounwind {
 ;CHECK-LABEL: test_v2f64:
 ;CHECK: fmla.2d
 ;CHECK-NOT: fmla.2d
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
-  %tmp3 = load <2 x double>, <2 x double>* %C
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
+  %tmp3 = load <2 x double>, ptr %C
   %tmp4 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
   ret <2 x double> %tmp4
 }
 
-define <4 x double> @test_v4f64(<4 x double>* %A, <4 x double>* %B, <4 x double>* %C) nounwind {
+define <4 x double> @test_v4f64(ptr %A, ptr %B, ptr %C) nounwind {
 ;CHECK-LABEL: test_v4f64:
 ;CHECK: fmla.2d
 ;CHECK: fmla.2d
 ;CHECK-NOT: fmla.2d
-  %tmp1 = load <4 x double>, <4 x double>* %A
-  %tmp2 = load <4 x double>, <4 x double>* %B
-  %tmp3 = load <4 x double>, <4 x double>* %C
+  %tmp1 = load <4 x double>, ptr %A
+  %tmp2 = load <4 x double>, ptr %B
+  %tmp3 = load <4 x double>, ptr %C
   %tmp4 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %tmp1, <4 x double> %tmp2, <4 x double> %tmp3)
   ret <4 x double> %tmp4
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fold-address.ll b/llvm/test/CodeGen/AArch64/arm64-fold-address.ll
index 6d2ea17d3424b..1775f13cd5aad 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fold-address.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fold-address.ll
@@ -7,30 +7,25 @@
 
 @"OBJC_IVAR_$_UIScreen._bounds" = external hidden global i64, section "__DATA, __objc_ivar", align 8
 
-define hidden %struct.CGRect @nofold(%0* nocapture %self, i8* nocapture %_cmd) nounwind readonly optsize ssp {
+define hidden %struct.CGRect @nofold(ptr nocapture %self, ptr nocapture %_cmd) nounwind readonly optsize ssp {
 entry:
 ; CHECK-LABEL: nofold:
 ; CHECK: add x[[REG:[0-9]+]], x0, x{{[0-9]+}}
 ; CHECK: ldp d0, d1, [x[[REG]]]
 ; CHECK: ldp d2, d3, [x[[REG]], #16]
 ; CHECK: ret
-  %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
-  %0 = bitcast %0* %self to i8*
-  %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
-  %add.ptr10.0 = bitcast i8* %add.ptr to double*
-  %tmp11 = load double, double* %add.ptr10.0, align 8
+  %ivar = load i64, ptr @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+  %add.ptr = getelementptr inbounds i8, ptr %self, i64 %ivar
+  %tmp11 = load double, ptr %add.ptr, align 8
   %add.ptr.sum = add i64 %ivar, 8
-  %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
-  %1 = bitcast i8* %add.ptr10.1 to double*
-  %tmp12 = load double, double* %1, align 8
+  %add.ptr10.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr.sum
+  %tmp12 = load double, ptr %add.ptr10.1, align 8
   %add.ptr.sum17 = add i64 %ivar, 16
-  %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17
-  %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
-  %tmp = load double, double* %add.ptr4.1.0, align 8
+  %add.ptr4.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr.sum17
+  %tmp = load double, ptr %add.ptr4.1, align 8
   %add.ptr4.1.sum = add i64 %ivar, 24
-  %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum
-  %2 = bitcast i8* %add.ptr4.1.1 to double*
-  %tmp5 = load double, double* %2, align 8
+  %add.ptr4.1.1 = getelementptr inbounds i8, ptr %self, i64 %add.ptr4.1.sum
+  %tmp5 = load double, ptr %add.ptr4.1.1, align 8
   %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
   %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
   %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0
@@ -40,26 +35,21 @@ entry:
   ret %struct.CGRect %insert3
 }
 
-define hidden %struct.CGRect @fold(%0* nocapture %self, i8* nocapture %_cmd) nounwind readonly optsize ssp {
+define hidden %struct.CGRect @fold(ptr nocapture %self, ptr nocapture %_cmd) nounwind readonly optsize ssp {
 entry:
 ; CHECK-LABEL: fold:
 ; CHECK: ldr d0, [x0, x{{[0-9]+}}]
 ; CHECK-NOT: add x0, x0, x1
 ; CHECK: ret
-  %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
-  %0 = bitcast %0* %self to i8*
-  %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
-  %add.ptr10.0 = bitcast i8* %add.ptr to double*
-  %tmp11 = load double, double* %add.ptr10.0, align 8
-  %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
-  %1 = bitcast i8* %add.ptr10.1 to double*
-  %tmp12 = load double, double* %1, align 8
-  %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
-  %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
-  %tmp = load double, double* %add.ptr4.1.0, align 8
-  %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
-  %2 = bitcast i8* %add.ptr4.1.1 to double*
-  %tmp5 = load double, double* %2, align 8
+  %ivar = load i64, ptr @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+  %add.ptr = getelementptr inbounds i8, ptr %self, i64 %ivar
+  %tmp11 = load double, ptr %add.ptr, align 8
+  %add.ptr10.1 = getelementptr inbounds i8, ptr %self, i64 %ivar
+  %tmp12 = load double, ptr %add.ptr10.1, align 8
+  %add.ptr4.1 = getelementptr inbounds i8, ptr %self, i64 %ivar
+  %tmp = load double, ptr %add.ptr4.1, align 8
+  %add.ptr4.1.1 = getelementptr inbounds i8, ptr %self, i64 %ivar
+  %tmp5 = load double, ptr %add.ptr4.1.1, align 8
   %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
   %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
   %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fold-lsl.ll b/llvm/test/CodeGen/AArch64/arm64-fold-lsl.ll
index 5e4aae3df8365..ff9374f254f55 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fold-lsl.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fold-lsl.ll
@@ -6,137 +6,137 @@
 %struct.b = type [256 x i32]
 %struct.c = type [256 x i64]
 
-define i16 @load_halfword(%struct.a* %ctx, i32 %xor72) nounwind {
+define i16 @load_halfword(ptr %ctx, i32 %xor72) nounwind {
 ; CHECK-LABEL: load_halfword:
 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
 ; CHECK: ldrh w0, [x0, [[REG]], lsl #1]
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
-  %result = load i16, i16* %arrayidx86, align 2
+  %arrayidx86 = getelementptr inbounds %struct.a, ptr %ctx, i64 0, i64 %idxprom83
+  %result = load i16, ptr %arrayidx86, align 2
   ret i16 %result
 }
 
-define i32 @load_word(%struct.b* %ctx, i32 %xor72) nounwind {
+define i32 @load_word(ptr %ctx, i32 %xor72) nounwind {
 ; CHECK-LABEL: load_word:
 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
 ; CHECK: ldr w0, [x0, [[REG]], lsl #2]
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
-  %result = load i32, i32* %arrayidx86, align 4
+  %arrayidx86 = getelementptr inbounds %struct.b, ptr %ctx, i64 0, i64 %idxprom83
+  %result = load i32, ptr %arrayidx86, align 4
   ret i32 %result
 }
 
-define i64 @load_doubleword(%struct.c* %ctx, i32 %xor72) nounwind {
+define i64 @load_doubleword(ptr %ctx, i32 %xor72) nounwind {
 ; CHECK-LABEL: load_doubleword:
 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
 ; CHECK: ldr x0, [x0, [[REG]], lsl #3]
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
-  %result = load i64, i64* %arrayidx86, align 8
+  %arrayidx86 = getelementptr inbounds %struct.c, ptr %ctx, i64 0, i64 %idxprom83
+  %result = load i64, ptr %arrayidx86, align 8
   ret i64 %result
 }
 
-define void @store_halfword(%struct.a* %ctx, i32 %xor72, i16 %val) nounwind {
+define void @store_halfword(ptr %ctx, i32 %xor72, i16 %val) nounwind {
 ; CHECK-LABEL: store_halfword:
 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
 ; CHECK: strh w2, [x0, [[REG]], lsl #1]
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
-  store i16 %val, i16* %arrayidx86, align 8
+  %arrayidx86 = getelementptr inbounds %struct.a, ptr %ctx, i64 0, i64 %idxprom83
+  store i16 %val, ptr %arrayidx86, align 8
   ret void
 }
 
-define void @store_word(%struct.b* %ctx, i32 %xor72, i32 %val) nounwind {
+define void @store_word(ptr %ctx, i32 %xor72, i32 %val) nounwind {
 ; CHECK-LABEL: store_word:
 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
 ; CHECK: str w2, [x0, [[REG]], lsl #2]
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
-  store i32 %val, i32* %arrayidx86, align 8
+  %arrayidx86 = getelementptr inbounds %struct.b, ptr %ctx, i64 0, i64 %idxprom83
+  store i32 %val, ptr %arrayidx86, align 8
   ret void
 }
 
-define void @store_doubleword(%struct.c* %ctx, i32 %xor72, i64 %val) nounwind {
+define void @store_doubleword(ptr %ctx, i32 %xor72, i64 %val) nounwind {
 ; CHECK-LABEL: store_doubleword:
 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
 ; CHECK: str x2, [x0, [[REG]], lsl #3]
   %shr81 = lshr i32 %xor72, 9
   %conv82 = zext i32 %shr81 to i64
   %idxprom83 = and i64 %conv82, 255
-  %arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
-  store i64 %val, i64* %arrayidx86, align 8
+  %arrayidx86 = getelementptr inbounds %struct.c, ptr %ctx, i64 0, i64 %idxprom83
+  store i64 %val, ptr %arrayidx86, align 8
   ret void
 }
 
 ; Check that we combine a shift into the offset instead of using a narrower load
 ; when we have a load followed by a trunc
 
-define i32 @load_doubleword_trunc_word(i64* %ptr, i64 %off) {
+define i32 @load_doubleword_trunc_word(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_word:
 ; CHECK: ldr x0, [x0, x1, lsl #3]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i32
   ret i32 %trunc
 }
 
-define i16 @load_doubleword_trunc_halfword(i64* %ptr, i64 %off) {
+define i16 @load_doubleword_trunc_halfword(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_halfword:
 ; CHECK: ldr x0, [x0, x1, lsl #3]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i16
   ret i16 %trunc
 }
 
-define i8 @load_doubleword_trunc_byte(i64* %ptr, i64 %off) {
+define i8 @load_doubleword_trunc_byte(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_byte:
 ; CHECK: ldr x0, [x0, x1, lsl #3]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i8
   ret i8 %trunc
 }
 
-define i16 @load_word_trunc_halfword(i32* %ptr, i64 %off) {
+define i16 @load_word_trunc_halfword(ptr %ptr, i64 %off) {
 entry:
 ; CHECK-LABEL: load_word_trunc_halfword:
 ; CHECK: ldr w0, [x0, x1, lsl #2]
-  %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
-  %x = load i32, i32* %idx, align 8
+  %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+  %x = load i32, ptr %idx, align 8
   %trunc = trunc i32 %x to i16
   ret i16 %trunc
 }
 
-define i8 @load_word_trunc_byte(i32* %ptr, i64 %off) {
+define i8 @load_word_trunc_byte(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_word_trunc_byte:
 ; CHECK: ldr w0, [x0, x1, lsl #2]
 entry:
- %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
- %x = load i32, i32* %idx, align 8
+ %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+ %x = load i32, ptr %idx, align 8
  %trunc = trunc i32 %x to i8
  ret i8 %trunc
 }
 
-define i8 @load_halfword_trunc_byte(i16* %ptr, i64 %off) {
+define i8 @load_halfword_trunc_byte(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_halfword_trunc_byte:
 ; CHECK: ldrh w0, [x0, x1, lsl #1]
 entry:
- %idx = getelementptr inbounds i16, i16* %ptr, i64 %off
- %x = load i16, i16* %idx, align 8
+ %idx = getelementptr inbounds i16, ptr %ptr, i64 %off
+ %x = load i16, ptr %idx, align 8
  %trunc = trunc i16 %x to i8
  ret i8 %trunc
 }
@@ -144,73 +144,73 @@ entry:
 ; Check that we do use a narrower load, and so don't combine the shift, when
 ; the loaded value is zero-extended.
 
-define i64 @load_doubleword_trunc_word_zext(i64* %ptr, i64 %off) {
+define i64 @load_doubleword_trunc_word_zext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_word_zext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #3
 ; CHECK: ldr w0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i32
   %ext = zext i32 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_doubleword_trunc_halfword_zext(i64* %ptr, i64 %off) {
+define i64 @load_doubleword_trunc_halfword_zext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_halfword_zext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #3
 ; CHECK: ldrh w0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i16
   %ext = zext i16 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_doubleword_trunc_byte_zext(i64* %ptr, i64 %off) {
+define i64 @load_doubleword_trunc_byte_zext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_byte_zext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #3
 ; CHECK: ldrb w0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i8
   %ext = zext i8 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_word_trunc_halfword_zext(i32* %ptr, i64 %off) {
+define i64 @load_word_trunc_halfword_zext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_word_trunc_halfword_zext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #2
 ; CHECK: ldrh w0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
-  %x = load i32, i32* %idx, align 8
+  %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+  %x = load i32, ptr %idx, align 8
   %trunc = trunc i32 %x to i16
   %ext = zext i16 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_word_trunc_byte_zext(i32* %ptr, i64 %off) {
+define i64 @load_word_trunc_byte_zext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_word_trunc_byte_zext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #2
 ; CHECK: ldrb w0, [x0, [[REG]]]
 entry:
- %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
- %x = load i32, i32* %idx, align 8
+ %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+ %x = load i32, ptr %idx, align 8
  %trunc = trunc i32 %x to i8
  %ext = zext i8 %trunc to i64
  ret i64 %ext
 }
 
-define i64 @load_halfword_trunc_byte_zext(i16* %ptr, i64 %off) {
+define i64 @load_halfword_trunc_byte_zext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_halfword_trunc_byte_zext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #1
 ; CHECK: ldrb w0, [x0, [[REG]]]
 entry:
- %idx = getelementptr inbounds i16, i16* %ptr, i64 %off
- %x = load i16, i16* %idx, align 8
+ %idx = getelementptr inbounds i16, ptr %ptr, i64 %off
+ %x = load i16, ptr %idx, align 8
  %trunc = trunc i16 %x to i8
  %ext = zext i8 %trunc to i64
  ret i64 %ext
@@ -219,73 +219,73 @@ entry:
 ; Check that we do use a narrower load, and so don't combine the shift, when
 ; the loaded value is sign-extended.
 
-define i64 @load_doubleword_trunc_word_sext(i64* %ptr, i64 %off) {
+define i64 @load_doubleword_trunc_word_sext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_word_sext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #3
 ; CHECK: ldrsw x0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i32
   %ext = sext i32 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_doubleword_trunc_halfword_sext(i64* %ptr, i64 %off) {
+define i64 @load_doubleword_trunc_halfword_sext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_halfword_sext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #3
 ; CHECK: ldrsh x0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i16
   %ext = sext i16 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_doubleword_trunc_byte_sext(i64* %ptr, i64 %off) {
+define i64 @load_doubleword_trunc_byte_sext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_byte_sext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #3
 ; CHECK: ldrsb x0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i8
   %ext = sext i8 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_word_trunc_halfword_sext(i32* %ptr, i64 %off) {
+define i64 @load_word_trunc_halfword_sext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_word_trunc_halfword_sext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #2
 ; CHECK: ldrsh x0, [x0, [[REG]]]
 entry:
-  %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
-  %x = load i32, i32* %idx, align 8
+  %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+  %x = load i32, ptr %idx, align 8
   %trunc = trunc i32 %x to i16
   %ext = sext i16 %trunc to i64
   ret i64 %ext
 }
 
-define i64 @load_word_trunc_byte_sext(i32* %ptr, i64 %off) {
+define i64 @load_word_trunc_byte_sext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_word_trunc_byte_sext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #2
 ; CHECK: ldrsb x0, [x0, [[REG]]]
 entry:
- %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
- %x = load i32, i32* %idx, align 8
+ %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+ %x = load i32, ptr %idx, align 8
  %trunc = trunc i32 %x to i8
  %ext = sext i8 %trunc to i64
  ret i64 %ext
 }
 
-define i64 @load_halfword_trunc_byte_sext(i16* %ptr, i64 %off) {
+define i64 @load_halfword_trunc_byte_sext(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_halfword_trunc_byte_sext:
 ; CHECK: lsl [[REG:x[0-9]+]], x1, #1
 ; CHECK: ldrsb x0, [x0, [[REG]]]
 entry:
- %idx = getelementptr inbounds i16, i16* %ptr, i64 %off
- %x = load i16, i16* %idx, align 8
+ %idx = getelementptr inbounds i16, ptr %ptr, i64 %off
+ %x = load i16, ptr %idx, align 8
  %trunc = trunc i16 %x to i8
  %ext = sext i8 %trunc to i64
  ret i64 %ext
@@ -294,14 +294,14 @@ entry:
 ; Check that we don't combine the shift, and so will use a narrower load, when
 ; the shift is used more than once.
 
-define i32 @load_doubleword_trunc_word_reuse_shift(i64* %ptr, i64 %off) {
+define i32 @load_doubleword_trunc_word_reuse_shift(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_word_reuse_shift:
 ; CHECK: lsl x[[REG1:[0-9]+]], x1, #3
 ; CHECK: ldr w[[REG2:[0-9]+]], [x0, x[[REG1]]]
 ; CHECK: add w0, w[[REG2]], w[[REG1]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i32
   %lsl = shl i64 %off, 3
   %lsl.trunc = trunc i64 %lsl to i32
@@ -309,14 +309,14 @@ entry:
   ret i32 %add
 }
 
-define i16 @load_doubleword_trunc_halfword_reuse_shift(i64* %ptr, i64 %off) {
+define i16 @load_doubleword_trunc_halfword_reuse_shift(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_halfword_reuse_shift:
 ; CHECK: lsl x[[REG1:[0-9]+]], x1, #3
 ; CHECK: ldrh w[[REG2:[0-9]+]], [x0, x[[REG1]]]
 ; CHECK: add w0, w[[REG2]], w[[REG1]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i16
   %lsl = shl i64 %off, 3
   %lsl.trunc = trunc i64 %lsl to i16
@@ -324,14 +324,14 @@ entry:
   ret i16 %add
 }
 
-define i8 @load_doubleword_trunc_byte_reuse_shift(i64* %ptr, i64 %off) {
+define i8 @load_doubleword_trunc_byte_reuse_shift(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_doubleword_trunc_byte_reuse_shift:
 ; CHECK: lsl x[[REG1:[0-9]+]], x1, #3
 ; CHECK: ldrb w[[REG2:[0-9]+]], [x0, x[[REG1]]]
 ; CHECK: add w0, w[[REG2]], w[[REG1]]
 entry:
-  %idx = getelementptr inbounds i64, i64* %ptr, i64 %off
-  %x = load i64, i64* %idx, align 8
+  %idx = getelementptr inbounds i64, ptr %ptr, i64 %off
+  %x = load i64, ptr %idx, align 8
   %trunc = trunc i64 %x to i8
   %lsl = shl i64 %off, 3
   %lsl.trunc = trunc i64 %lsl to i8
@@ -339,14 +339,14 @@ entry:
   ret i8 %add
 }
 
-define i16 @load_word_trunc_halfword_reuse_shift(i32* %ptr, i64 %off) {
+define i16 @load_word_trunc_halfword_reuse_shift(ptr %ptr, i64 %off) {
 entry:
 ; CHECK-LABEL: load_word_trunc_halfword_reuse_shift:
 ; CHECK: lsl x[[REG1:[0-9]+]], x1, #2
 ; CHECK: ldrh w[[REG2:[0-9]+]], [x0, x[[REG1]]]
 ; CHECK: add w0, w[[REG2]], w[[REG1]]
-  %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
-  %x = load i32, i32* %idx, align 8
+  %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+  %x = load i32, ptr %idx, align 8
   %trunc = trunc i32 %x to i16
   %lsl = shl i64 %off, 2
   %lsl.trunc = trunc i64 %lsl to i16
@@ -354,14 +354,14 @@ entry:
   ret i16 %add
 }
 
-define i8 @load_word_trunc_byte_reuse_shift(i32* %ptr, i64 %off) {
+define i8 @load_word_trunc_byte_reuse_shift(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_word_trunc_byte_reuse_shift:
 ; CHECK: lsl x[[REG1:[0-9]+]], x1, #2
 ; CHECK: ldrb w[[REG2:[0-9]+]], [x0, x[[REG1]]]
 ; CHECK: add w0, w[[REG2]], w[[REG1]]
 entry:
-  %idx = getelementptr inbounds i32, i32* %ptr, i64 %off
-  %x = load i32, i32* %idx, align 8
+  %idx = getelementptr inbounds i32, ptr %ptr, i64 %off
+  %x = load i32, ptr %idx, align 8
   %trunc = trunc i32 %x to i8
   %lsl = shl i64 %off, 2
   %lsl.trunc = trunc i64 %lsl to i8
@@ -369,14 +369,14 @@ entry:
   ret i8 %add
 }
 
-define i8 @load_halfword_trunc_byte_reuse_shift(i16* %ptr, i64 %off) {
+define i8 @load_halfword_trunc_byte_reuse_shift(ptr %ptr, i64 %off) {
 ; CHECK-LABEL: load_halfword_trunc_byte_reuse_shift:
 ; CHECK: lsl x[[REG1:[0-9]+]], x1, #1
 ; CHECK: ldrb w[[REG2:[0-9]+]], [x0, x[[REG1]]]
 ; CHECK: add w0, w[[REG2]], w[[REG1]]
 entry:
-  %idx = getelementptr inbounds i16, i16* %ptr, i64 %off
-  %x = load i16, i16* %idx, align 8
+  %idx = getelementptr inbounds i16, ptr %ptr, i64 %off
+  %x = load i16, ptr %idx, align 8
   %trunc = trunc i16 %x to i8
   %lsl = shl i64 %off, 1
   %lsl.trunc = trunc i64 %lsl to i8

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp.ll b/llvm/test/CodeGen/AArch64/arm64-fp.ll
index 442432e55798e..c7ef439b764dd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp.ll
@@ -13,7 +13,7 @@ define float @t1(i1 %a, float %b, float %c) nounwind {
 
 ; This may infinite loop if isNegatibleForFree and getNegatedExpression are conflicted.
 
-define double @negation_propagation(double* %arg, double %arg1, double %arg2) {
+define double @negation_propagation(ptr %arg, double %arg1, double %arg2) {
 ; CHECK-LABEL: negation_propagation:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov d2, #1.00000000

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll b/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll
index 62ac0b62ce987..2f336a1d3874c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s
-declare void @bar(i8*, i8*, i32*)
+declare void @bar(ptr, ptr, ptr)
 
 ; SelectionDAG used to try to fold some fp128 operations using the ppc128 type,
 ; which is not supported.
@@ -7,8 +7,8 @@ declare void @bar(i8*, i8*, i32*)
 define fp128 @test_folding() {
 ; CHECK-LABEL: test_folding:
   %l = alloca i32
-  store i32 42, i32* %l
-  %val = load i32, i32* %l
+  store i32 42, ptr %l
+  %val = load i32, ptr %l
   %fpval = sitofp i32 %val to fp128
   ; If the value is loaded from a constant pool into an fp128, it's been folded
   ; successfully.

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 5a25f128889cf..9efe28b383c8d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -13,8 +13,8 @@ define fp128 @test_add() {
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    b __addtf3
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
   %val = fadd fp128 %lhs, %rhs
   ret fp128 %val
@@ -29,8 +29,8 @@ define fp128 @test_sub() {
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    b __subtf3
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
   %val = fsub fp128 %lhs, %rhs
   ret fp128 %val
@@ -45,8 +45,8 @@ define fp128 @test_mul() {
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    b __multf3
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
   %val = fmul fp128 %lhs, %rhs
   ret fp128 %val
@@ -61,8 +61,8 @@ define fp128 @test_div() {
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    b __divtf3
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
   %val = fdiv fp128 %lhs, %rhs
   ret fp128 %val
@@ -91,13 +91,13 @@ define dso_local void @test_fptosi() {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %val = load fp128, fp128* @lhs, align 16
+  %val = load fp128, ptr @lhs, align 16
 
   %val32 = fptosi fp128 %val to i32
-  store i32 %val32, i32* @var32
+  store i32 %val32, ptr @var32
 
   %val64 = fptosi fp128 %val to i64
-  store i64 %val64, i64* @var64
+  store i64 %val64, ptr @var64
 
   ret void
 }
@@ -122,13 +122,13 @@ define dso_local void @test_fptoui() {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %val = load fp128, fp128* @lhs, align 16
+  %val = load fp128, ptr @lhs, align 16
 
   %val32 = fptoui fp128 %val to i32
-  store i32 %val32, i32* @var32
+  store i32 %val32, ptr @var32
 
   %val64 = fptoui fp128 %val to i64
-  store i64 %val64, i64* @var64
+  store i64 %val64, ptr @var64
 
   ret void
 }
@@ -152,13 +152,13 @@ define dso_local void @test_sitofp() {
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
 
-  %src32 = load i32, i32* @var32
+  %src32 = load i32, ptr @var32
   %val32 = sitofp i32 %src32 to fp128
-  store volatile fp128 %val32, fp128* @lhs
+  store volatile fp128 %val32, ptr @lhs
 
-  %src64 = load i64, i64* @var64
+  %src64 = load i64, ptr @var64
   %val64 = sitofp i64 %src64 to fp128
-  store volatile fp128 %val64, fp128* @lhs
+  store volatile fp128 %val64, ptr @lhs
 
   ret void
 }
@@ -182,13 +182,13 @@ define dso_local void @test_uitofp() {
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
 
-  %src32 = load i32, i32* @var32
+  %src32 = load i32, ptr @var32
   %val32 = uitofp i32 %src32 to fp128
-  store volatile fp128 %val32, fp128* @lhs
+  store volatile fp128 %val32, ptr @lhs
 
-  %src64 = load i64, i64* @var64
+  %src64 = load i64, ptr @var64
   %val64 = uitofp i64 %src64 to fp128
-  store volatile fp128 %val64, fp128* @lhs
+  store volatile fp128 %val64, ptr @lhs
 
   ret void
 }
@@ -209,8 +209,8 @@ define dso_local i1 @test_setcc1() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
 ; Technically, everything after the call to __letf2 is redundant, but we'll let
 ; LLVM have its fun for now.
@@ -235,8 +235,8 @@ define dso_local i1 @test_setcc2() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
   %val = fcmp ugt fp128 %lhs, %rhs
 
@@ -267,8 +267,8 @@ define dso_local i1 @test_setcc3() {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
   %val = fcmp ueq fp128 %lhs, %rhs
 
@@ -304,8 +304,8 @@ define dso_local i32 @test_br_cc() uwtable {
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 
-  %lhs = load fp128, fp128* @lhs, align 16
-  %rhs = load fp128, fp128* @rhs, align 16
+  %lhs = load fp128, ptr @lhs, align 16
+  %rhs = load fp128, ptr @rhs, align 16
 
   ; olt == !uge, which LLVM optimizes this to.
   %cond = fcmp olt fp128 %lhs, %rhs
@@ -330,7 +330,7 @@ define dso_local void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
 ; CHECK-NEXT:    ret
 
   %val = select i1 %cond, fp128 %lhs, fp128 %rhs
-  store fp128 %val, fp128* @lhs, align 16
+  store fp128 %val, ptr @lhs, align 16
   ret void
 }
 
@@ -363,16 +363,16 @@ define dso_local void @test_round() {
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
 
-  %val = load fp128, fp128* @lhs, align 16
+  %val = load fp128, ptr @lhs, align 16
 
   %half = fptrunc fp128 %val to half
-  store half %half, half* @varhalf, align 2
+  store half %half, ptr @varhalf, align 2
 
   %float = fptrunc fp128 %val to float
-  store float %float, float* @varfloat, align 4
+  store float %float, ptr @varfloat, align 4
 
   %double = fptrunc fp128 %val to double
-  store double %double, double* @vardouble, align 8
+  store double %double, ptr @vardouble, align 8
 
   ret void
 }
@@ -400,19 +400,19 @@ define dso_local void @test_extend() {
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
 
-  %val = load fp128, fp128* @lhs, align 16
+  %val = load fp128, ptr @lhs, align 16
 
-  %half = load half, half* @varhalf
+  %half = load half, ptr @varhalf
   %fromhalf = fpext half %half to fp128
-  store volatile fp128 %fromhalf, fp128* @lhs, align 16
+  store volatile fp128 %fromhalf, ptr @lhs, align 16
 
-  %float = load float, float* @varfloat
+  %float = load float, ptr @varfloat
   %fromfloat = fpext float %float to fp128
-  store volatile fp128 %fromfloat, fp128* @lhs, align 16
+  store volatile fp128 %fromfloat, ptr @lhs, align 16
 
-  %double = load double, double* @vardouble
+  %double = load double, ptr @vardouble
   %fromdouble = fpext double %double to fp128
-  store volatile fp128 %fromdouble, fp128* @lhs, align 16
+  store volatile fp128 %fromdouble, ptr @lhs, align 16
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-global-address.ll b/llvm/test/CodeGen/AArch64/arm64-global-address.ll
index 005f414f8752d..e3e24d819c10a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-global-address.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-global-address.ll
@@ -8,7 +8,7 @@ define i32 @test(i32 %off) nounwind {
 ; CHECK: adrp x[[REG:[0-9]+]], _G at GOTPAGE
 ; CHECK: ldr  x[[REG2:[0-9]+]], [x[[REG]], _G at GOTPAGEOFF]
 ; CHECK: add w0, w[[REG2]], w0
-  %tmp = ptrtoint i32* @G to i32
+  %tmp = ptrtoint ptr @G to i32
   %tmp1 = add i32 %tmp, %off
   ret i32 %tmp1
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-hello.ll b/llvm/test/CodeGen/AArch64/arm64-hello.ll
index 5e1bd9d499202..83b42b305f4a9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-hello.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-hello.ll
@@ -27,9 +27,9 @@
 define i32 @main() nounwind ssp {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %call = call i32 @puts(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0))
+  store i32 0, ptr %retval
+  %call = call i32 @puts(ptr @.str)
   ret i32 %call
 }
 
-declare i32 @puts(i8*)
+declare i32 @puts(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
index 64e58a880d934..9e32be58273ee 100644
--- a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
@@ -37,19 +37,17 @@ declare i32 @_Z3gooi(i32);
 define i32 @foo(i32 %c) nounwind minsize {
 entry:
   %buffer = alloca [1 x i32], align 4
-  %0 = bitcast [1 x i32]* %buffer to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  %arraydecay = getelementptr inbounds [1 x i32], [1 x i32]* %buffer, i64 0, i64 0
-  %call = call i32 @goo(i32* nonnull %arraydecay)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %buffer)
+  %call = call i32 @goo(ptr nonnull %buffer)
   %sub = sub nsw i32 %c, %call
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %buffer)
 
   ret i32 %sub
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare i32 @goo(i32*)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare i32 @goo(ptr)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 ; CHECK-LABEL: _OUTLINED_FUNCTION_PROLOG_x30x29x19x20x21x22:
 ; CHECK:      stp     x22, x21, [sp, #-32]!

diff  --git a/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll b/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
index 1e38266b27daa..a13b82bb903bb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
@@ -1,9 +1,9 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define i32 @foo(<4 x i16>* %__a) nounwind {
+define i32 @foo(ptr %__a) nounwind {
 ; CHECK-LABEL: foo:
 ; CHECK: umov.h w{{[0-9]+}}, v{{[0-9]+}}[0]
-  %tmp18 = load <4 x i16>, <4 x i16>* %__a, align 8
+  %tmp18 = load <4 x i16>, ptr %__a, align 8
   %vget_lane = extractelement <4 x i16> %tmp18, i32 0
   %conv = zext i16 %vget_lane to i32
   %mul = mul nsw i32 3, %conv

diff  --git a/llvm/test/CodeGen/AArch64/arm64-indexed-memory.ll b/llvm/test/CodeGen/AArch64/arm64-indexed-memory.ll
index 697c3d0c769fd..d1747e7ca1315 100644
--- a/llvm/test/CodeGen/AArch64/arm64-indexed-memory.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-indexed-memory.ll
@@ -2,573 +2,573 @@
 ; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-redzone | FileCheck %s --check-prefixes=CHECK,CHECK64
 ; RUN: llc < %s -mtriple=arm64_32-apple-ios -aarch64-redzone | FileCheck %s --check-prefixes=CHECK,CHECK32
 
-define i64* @store64(i64* %ptr, i64 %index, i64 %spacing) {
+define ptr @store64(ptr %ptr, i64 %index, i64 %spacing) {
 ; CHECK-LABEL: store64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str x2, [x0], #8
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 1
-  store i64 %spacing, i64* %ptr, align 4
-  ret i64* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i64, ptr %ptr, i64 1
+  store i64 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i64* @store64idxpos256(i64* %ptr, i64 %index, i64 %spacing) {
+define ptr @store64idxpos256(ptr %ptr, i64 %index, i64 %spacing) {
 ; CHECK-LABEL: store64idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    str x2, [x8]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 32
-  store i64 %spacing, i64* %ptr, align 4
-  ret i64* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i64, ptr %ptr, i64 32
+  store i64 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i64* @store64idxneg256(i64* %ptr, i64 %index, i64 %spacing) {
+define ptr @store64idxneg256(ptr %ptr, i64 %index, i64 %spacing) {
 ; CHECK-LABEL: store64idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str x2, [x0], #-256
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 -32
-  store i64 %spacing, i64* %ptr, align 4
-  ret i64* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i64, ptr %ptr, i64 -32
+  store i64 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @store32(i32* %ptr, i32 %index, i32 %spacing) {
+define ptr @store32(ptr %ptr, i32 %index, i32 %spacing) {
 ; CHECK-LABEL: store32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w2, [x0], #4
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 1
-  store i32 %spacing, i32* %ptr, align 4
-  ret i32* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 1
+  store i32 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @store32idxpos256(i32* %ptr, i32 %index, i32 %spacing) {
+define ptr @store32idxpos256(ptr %ptr, i32 %index, i32 %spacing) {
 ; CHECK-LABEL: store32idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    str w2, [x8]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 64
-  store i32 %spacing, i32* %ptr, align 4
-  ret i32* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 64
+  store i32 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @store32idxneg256(i32* %ptr, i32 %index, i32 %spacing) {
+define ptr @store32idxneg256(ptr %ptr, i32 %index, i32 %spacing) {
 ; CHECK-LABEL: store32idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w2, [x0], #-256
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 -64
-  store i32 %spacing, i32* %ptr, align 4
-  ret i32* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 -64
+  store i32 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @store16(i16* %ptr, i16 %index, i16 %spacing) {
+define ptr @store16(ptr %ptr, i16 %index, i16 %spacing) {
 ; CHECK-LABEL: store16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w2, [x0], #2
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 1
-  store i16 %spacing, i16* %ptr, align 4
-  ret i16* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 1
+  store i16 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @store16idxpos256(i16* %ptr, i16 %index, i16 %spacing) {
+define ptr @store16idxpos256(ptr %ptr, i16 %index, i16 %spacing) {
 ; CHECK-LABEL: store16idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    strh w2, [x8]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 128
-  store i16 %spacing, i16* %ptr, align 4
-  ret i16* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 128
+  store i16 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @store16idxneg256(i16* %ptr, i16 %index, i16 %spacing) {
+define ptr @store16idxneg256(ptr %ptr, i16 %index, i16 %spacing) {
 ; CHECK-LABEL: store16idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w2, [x0], #-256
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 -128
-  store i16 %spacing, i16* %ptr, align 4
-  ret i16* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 -128
+  store i16 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @store8(i8* %ptr, i8 %index, i8 %spacing) {
+define ptr @store8(ptr %ptr, i8 %index, i8 %spacing) {
 ; CHECK-LABEL: store8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb w2, [x0], #1
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
-  store i8 %spacing, i8* %ptr, align 4
-  ret i8* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
+  store i8 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @store8idxpos256(i8* %ptr, i8 %index, i8 %spacing) {
+define ptr @store8idxpos256(ptr %ptr, i8 %index, i8 %spacing) {
 ; CHECK-LABEL: store8idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    strb w2, [x8]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 256
-  store i8 %spacing, i8* %ptr, align 4
-  ret i8* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 256
+  store i8 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @store8idxneg256(i8* %ptr, i8 %index, i8 %spacing) {
+define ptr @store8idxneg256(ptr %ptr, i8 %index, i8 %spacing) {
 ; CHECK-LABEL: store8idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb w2, [x0], #-256
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 -256
-  store i8 %spacing, i8* %ptr, align 4
-  ret i8* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 -256
+  store i8 %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @truncst64to32(i32* %ptr, i32 %index, i64 %spacing) {
+define ptr @truncst64to32(ptr %ptr, i32 %index, i64 %spacing) {
 ; CHECK-LABEL: truncst64to32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w2, [x0], #4
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 1
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 1
   %trunc = trunc i64 %spacing to i32
-  store i32 %trunc, i32* %ptr, align 4
-  ret i32* %incdec.ptr
+  store i32 %trunc, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @truncst64to16(i16* %ptr, i16 %index, i64 %spacing) {
+define ptr @truncst64to16(ptr %ptr, i16 %index, i64 %spacing) {
 ; CHECK-LABEL: truncst64to16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w2, [x0], #2
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 1
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 1
   %trunc = trunc i64 %spacing to i16
-  store i16 %trunc, i16* %ptr, align 4
-  ret i16* %incdec.ptr
+  store i16 %trunc, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @truncst64to8(i8* %ptr, i8 %index, i64 %spacing) {
+define ptr @truncst64to8(ptr %ptr, i8 %index, i64 %spacing) {
 ; CHECK-LABEL: truncst64to8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb w2, [x0], #1
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 1
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 1
   %trunc = trunc i64 %spacing to i8
-  store i8 %trunc, i8* %ptr, align 4
-  ret i8* %incdec.ptr
+  store i8 %trunc, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
 
-define half* @storef16(half* %ptr, half %index, half %spacing) nounwind {
+define ptr @storef16(ptr %ptr, half %index, half %spacing) nounwind {
 ; CHECK-LABEL: storef16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str h1, [x0], #2
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds half, half* %ptr, i64 1
-  store half %spacing, half* %ptr, align 2
-  ret half* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds half, ptr %ptr, i64 1
+  store half %spacing, ptr %ptr, align 2
+  ret ptr %incdec.ptr
 }
 
-define float* @storef32(float* %ptr, float %index, float %spacing) {
+define ptr @storef32(ptr %ptr, float %index, float %spacing) {
 ; CHECK-LABEL: storef32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str s1, [x0], #4
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds float, float* %ptr, i64 1
-  store float %spacing, float* %ptr, align 4
-  ret float* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds float, ptr %ptr, i64 1
+  store float %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define double* @storef64(double* %ptr, double %index, double %spacing) {
+define ptr @storef64(ptr %ptr, double %index, double %spacing) {
 ; CHECK-LABEL: storef64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str d1, [x0], #8
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds double, double* %ptr, i64 1
-  store double %spacing, double* %ptr, align 4
-  ret double* %incdec.ptr
+  %incdec.ptr = getelementptr inbounds double, ptr %ptr, i64 1
+  store double %spacing, ptr %ptr, align 4
+  ret ptr %incdec.ptr
 }
 
 
-define double* @pref64(double* %ptr, double %spacing) {
+define ptr @pref64(ptr %ptr, double %spacing) {
 ; CHECK-LABEL: pref64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str d0, [x0, #32]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds double, double* %ptr, i64 4
-  store double %spacing, double* %incdec.ptr, align 4
-  ret double *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds double, ptr %ptr, i64 4
+  store double %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define float* @pref32(float* %ptr, float %spacing) {
+define ptr @pref32(ptr %ptr, float %spacing) {
 ; CHECK-LABEL: pref32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str s0, [x0, #12]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds float, float* %ptr, i64 3
-  store float %spacing, float* %incdec.ptr, align 4
-  ret float *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds float, ptr %ptr, i64 3
+  store float %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define half* @pref16(half* %ptr, half %spacing) nounwind {
+define ptr @pref16(ptr %ptr, half %spacing) nounwind {
 ; CHECK-LABEL: pref16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str h0, [x0, #6]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds half, half* %ptr, i64 3
-  store half %spacing, half* %incdec.ptr, align 2
-  ret half *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds half, ptr %ptr, i64 3
+  store half %spacing, ptr %incdec.ptr, align 2
+  ret ptr %incdec.ptr
 }
 
-define i64* @pre64(i64* %ptr, i64 %spacing) {
+define ptr @pre64(ptr %ptr, i64 %spacing) {
 ; CHECK-LABEL: pre64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str x1, [x0, #16]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 2
-  store i64 %spacing, i64* %incdec.ptr, align 4
-  ret i64 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i64, ptr %ptr, i64 2
+  store i64 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i64* @pre64idxpos256(i64* %ptr, i64 %spacing) {
+define ptr @pre64idxpos256(ptr %ptr, i64 %spacing) {
 ; CHECK-LABEL: pre64idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    str x1, [x8, #256]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 32
-  store i64 %spacing, i64* %incdec.ptr, align 4
-  ret i64 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i64, ptr %ptr, i64 32
+  store i64 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i64* @pre64idxneg256(i64* %ptr, i64 %spacing) {
+define ptr @pre64idxneg256(ptr %ptr, i64 %spacing) {
 ; CHECK-LABEL: pre64idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str x1, [x0, #-256]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i64, i64* %ptr, i64 -32
-  store i64 %spacing, i64* %incdec.ptr, align 4
-  ret i64 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i64, ptr %ptr, i64 -32
+  store i64 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @pre32(i32* %ptr, i32 %spacing) {
+define ptr @pre32(ptr %ptr, i32 %spacing) {
 ; CHECK-LABEL: pre32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w1, [x0, #8]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 2
-  store i32 %spacing, i32* %incdec.ptr, align 4
-  ret i32 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 2
+  store i32 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @pre32idxpos256(i32* %ptr, i32 %spacing) {
+define ptr @pre32idxpos256(ptr %ptr, i32 %spacing) {
 ; CHECK-LABEL: pre32idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    str w1, [x8, #256]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 64
-  store i32 %spacing, i32* %incdec.ptr, align 4
-  ret i32 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 64
+  store i32 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @pre32idxneg256(i32* %ptr, i32 %spacing) {
+define ptr @pre32idxneg256(ptr %ptr, i32 %spacing) {
 ; CHECK-LABEL: pre32idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w1, [x0, #-256]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 -64
-  store i32 %spacing, i32* %incdec.ptr, align 4
-  ret i32 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 -64
+  store i32 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @pre16(i16* %ptr, i16 %spacing) {
+define ptr @pre16(ptr %ptr, i16 %spacing) {
 ; CHECK-LABEL: pre16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w1, [x0, #4]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 2
-  store i16 %spacing, i16* %incdec.ptr, align 4
-  ret i16 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 2
+  store i16 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @pre16idxpos256(i16* %ptr, i16 %spacing) {
+define ptr @pre16idxpos256(ptr %ptr, i16 %spacing) {
 ; CHECK-LABEL: pre16idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    strh w1, [x8, #256]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 128
-  store i16 %spacing, i16* %incdec.ptr, align 4
-  ret i16 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 128
+  store i16 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @pre16idxneg256(i16* %ptr, i16 %spacing) {
+define ptr @pre16idxneg256(ptr %ptr, i16 %spacing) {
 ; CHECK-LABEL: pre16idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w1, [x0, #-256]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 -128
-  store i16 %spacing, i16* %incdec.ptr, align 4
-  ret i16 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 -128
+  store i16 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @pre8(i8* %ptr, i8 %spacing) {
+define ptr @pre8(ptr %ptr, i8 %spacing) {
 ; CHECK-LABEL: pre8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb w1, [x0, #2]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 2
-  store i8 %spacing, i8* %incdec.ptr, align 4
-  ret i8 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 2
+  store i8 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @pre8idxpos256(i8* %ptr, i8 %spacing) {
+define ptr @pre8idxpos256(ptr %ptr, i8 %spacing) {
 ; CHECK-LABEL: pre8idxpos256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:    add x0, x0, #256
 ; CHECK-NEXT:    strb w1, [x8, #256]
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 256
-  store i8 %spacing, i8* %incdec.ptr, align 4
-  ret i8 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 256
+  store i8 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @pre8idxneg256(i8* %ptr, i8 %spacing) {
+define ptr @pre8idxneg256(ptr %ptr, i8 %spacing) {
 ; CHECK-LABEL: pre8idxneg256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb w1, [x0, #-256]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 -256
-  store i8 %spacing, i8* %incdec.ptr, align 4
-  ret i8 *%incdec.ptr
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 -256
+  store i8 %spacing, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i32* @pretrunc64to32(i32* %ptr, i64 %spacing) {
+define ptr @pretrunc64to32(ptr %ptr, i64 %spacing) {
 ; CHECK-LABEL: pretrunc64to32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str w1, [x0, #8]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i32, i32* %ptr, i64 2
+  %incdec.ptr = getelementptr inbounds i32, ptr %ptr, i64 2
   %trunc = trunc i64 %spacing to i32
-  store i32 %trunc, i32* %incdec.ptr, align 4
-  ret i32 *%incdec.ptr
+  store i32 %trunc, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i16* @pretrunc64to16(i16* %ptr, i64 %spacing) {
+define ptr @pretrunc64to16(ptr %ptr, i64 %spacing) {
 ; CHECK-LABEL: pretrunc64to16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh w1, [x0, #4]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i16, i16* %ptr, i64 2
+  %incdec.ptr = getelementptr inbounds i16, ptr %ptr, i64 2
   %trunc = trunc i64 %spacing to i16
-  store i16 %trunc, i16* %incdec.ptr, align 4
-  ret i16 *%incdec.ptr
+  store i16 %trunc, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
-define i8* @pretrunc64to8(i8* %ptr, i64 %spacing) {
+define ptr @pretrunc64to8(ptr %ptr, i64 %spacing) {
 ; CHECK-LABEL: pretrunc64to8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb w1, [x0, #2]!
 ; CHECK-NEXT:    ret
-  %incdec.ptr = getelementptr inbounds i8, i8* %ptr, i64 2
+  %incdec.ptr = getelementptr inbounds i8, ptr %ptr, i64 2
   %trunc = trunc i64 %spacing to i8
-  store i8 %trunc, i8* %incdec.ptr, align 4
-  ret i8 *%incdec.ptr
+  store i8 %trunc, ptr %incdec.ptr, align 4
+  ret ptr %incdec.ptr
 }
 
 ;-----
 ; Pre-indexed loads
 ;-----
-define double* @preidxf64(double* %src, double* %out) {
+define ptr @preidxf64(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidxf64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]!
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds double, double* %src, i64 1
-  %tmp = load double, double* %ptr, align 4
-  store double %tmp, double* %out, align 4
-  ret double* %ptr
+  %ptr = getelementptr inbounds double, ptr %src, i64 1
+  %tmp = load double, ptr %ptr, align 4
+  store double %tmp, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define float* @preidxf32(float* %src, float* %out) {
+define ptr @preidxf32(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidxf32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0, #4]!
 ; CHECK-NEXT:    str s0, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %src, i64 1
-  %tmp = load float, float* %ptr, align 4
-  store float %tmp, float* %out, align 4
-  ret float* %ptr
+  %ptr = getelementptr inbounds float, ptr %src, i64 1
+  %tmp = load float, ptr %ptr, align 4
+  store float %tmp, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define half* @preidxf16(half* %src, half* %out) {
+define ptr @preidxf16(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidxf16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr h0, [x0, #2]!
 ; CHECK-NEXT:    str h0, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %src, i64 1
-  %tmp = load half, half* %ptr, align 2
-  store half %tmp, half* %out, align 2
-  ret half* %ptr
+  %ptr = getelementptr inbounds half, ptr %src, i64 1
+  %tmp = load half, ptr %ptr, align 2
+  store half %tmp, ptr %out, align 2
+  ret ptr %ptr
 }
 
-define i64* @preidx64(i64* %src, i64* %out) {
+define ptr @preidx64(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0, #8]!
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i64, i64* %src, i64 1
-  %tmp = load i64, i64* %ptr, align 4
-  store i64 %tmp, i64* %out, align 4
-  ret i64* %ptr
+  %ptr = getelementptr inbounds i64, ptr %src, i64 1
+  %tmp = load i64, ptr %ptr, align 4
+  store i64 %tmp, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i32* @preidx32(i32* %src, i32* %out) {
+define ptr @preidx32(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0, #4]!
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %src, i64 1
-  %tmp = load i32, i32* %ptr, align 4
-  store i32 %tmp, i32* %out, align 4
-  ret i32* %ptr
+  %ptr = getelementptr inbounds i32, ptr %src, i64 1
+  %tmp = load i32, ptr %ptr, align 4
+  store i32 %tmp, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i16* @preidx16zext32(i16* %src, i32* %out) {
+define ptr @preidx16zext32(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx16zext32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0, #2]!
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %src, i64 1
-  %tmp = load i16, i16* %ptr, align 4
+  %ptr = getelementptr inbounds i16, ptr %src, i64 1
+  %tmp = load i16, ptr %ptr, align 4
   %ext = zext i16 %tmp to i32
-  store i32 %ext, i32* %out, align 4
-  ret i16* %ptr
+  store i32 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i16* @preidx16zext64(i16* %src, i64* %out) {
+define ptr @preidx16zext64(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx16zext64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0, #2]!
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %src, i64 1
-  %tmp = load i16, i16* %ptr, align 4
+  %ptr = getelementptr inbounds i16, ptr %src, i64 1
+  %tmp = load i16, ptr %ptr, align 4
   %ext = zext i16 %tmp to i64
-  store i64 %ext, i64* %out, align 4
-  ret i16* %ptr
+  store i64 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i8* @preidx8zext32(i8* %src, i32* %out) {
+define ptr @preidx8zext32(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx8zext32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0, #1]!
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = zext i8 %tmp to i32
-  store i32 %ext, i32* %out, align 4
-  ret i8* %ptr
+  store i32 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i8* @preidx8zext64(i8* %src, i64* %out) {
+define ptr @preidx8zext64(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx8zext64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0, #1]!
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = zext i8 %tmp to i64
-  store i64 %ext, i64* %out, align 4
-  ret i8* %ptr
+  store i64 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i32* @preidx32sext64(i32* %src, i64* %out) {
+define ptr @preidx32sext64(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx32sext64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrsw x8, [x0, #4]!
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %src, i64 1
-  %tmp = load i32, i32* %ptr, align 4
+  %ptr = getelementptr inbounds i32, ptr %src, i64 1
+  %tmp = load i32, ptr %ptr, align 4
   %ext = sext i32 %tmp to i64
-  store i64 %ext, i64* %out, align 8
-  ret i32* %ptr
+  store i64 %ext, ptr %out, align 8
+  ret ptr %ptr
 }
 
-define i16* @preidx16sext32(i16* %src, i32* %out) {
+define ptr @preidx16sext32(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx16sext32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrsh w8, [x0, #2]!
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %src, i64 1
-  %tmp = load i16, i16* %ptr, align 4
+  %ptr = getelementptr inbounds i16, ptr %src, i64 1
+  %tmp = load i16, ptr %ptr, align 4
   %ext = sext i16 %tmp to i32
-  store i32 %ext, i32* %out, align 4
-  ret i16* %ptr
+  store i32 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i16* @preidx16sext64(i16* %src, i64* %out) {
+define ptr @preidx16sext64(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx16sext64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrsh x8, [x0, #2]!
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %src, i64 1
-  %tmp = load i16, i16* %ptr, align 4
+  %ptr = getelementptr inbounds i16, ptr %src, i64 1
+  %tmp = load i16, ptr %ptr, align 4
   %ext = sext i16 %tmp to i64
-  store i64 %ext, i64* %out, align 4
-  ret i16* %ptr
+  store i64 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i8* @preidx8sext32(i8* %src, i32* %out) {
+define ptr @preidx8sext32(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx8sext32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrsb w8, [x0, #1]!
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = sext i8 %tmp to i32
-  store i32 %ext, i32* %out, align 4
-  ret i8* %ptr
+  store i32 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
-define i8* @preidx8sext64(i8* %src, i64* %out) {
+define ptr @preidx8sext64(ptr %src, ptr %out) {
 ; CHECK-LABEL: preidx8sext64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrsb x8, [x0, #1]!
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %src, i64 1
-  %tmp = load i8, i8* %ptr, align 4
+  %ptr = getelementptr inbounds i8, ptr %src, i64 1
+  %tmp = load i8, ptr %ptr, align 4
   %ext = sext i8 %tmp to i64
-  store i64 %ext, i64* %out, align 4
-  ret i8* %ptr
+  store i64 %ext, ptr %out, align 4
+  ret ptr %ptr
 }
 
 ; This test checks if illegal post-index is generated
 
-define i64* @postidx_clobber(i64* %addr) nounwind noinline ssp {
+define ptr @postidx_clobber(ptr %addr) nounwind noinline ssp {
 ; CHECK64-LABEL: postidx_clobber:
 ; CHECK64:       ; %bb.0:
 ; CHECK64-NEXT:    mov x8, x0
@@ -583,8 +583,7 @@ define i64* @postidx_clobber(i64* %addr) nounwind noinline ssp {
 ; CHECK32-NEXT:    str w8, [x8]
 ; CHECK32-NEXT:    ret
 ; ret
- %paddr = bitcast i64* %addr to i64**
- store i64* %addr, i64** %paddr
- %newaddr = getelementptr i64, i64* %addr, i32 1
- ret i64* %newaddr
+ store ptr %addr, ptr %addr
+ %newaddr = getelementptr i64, ptr %addr, i32 1
+ ret ptr %newaddr
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
index 74f78e49fa8f5..a44dd67eed3fb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
@@ -7,11 +7,11 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios7.0.0"
 
 ; Function Attrs: nounwind ssp
-define void @f(double* %P1) #0 {
+define void @f(ptr %P1) #0 {
 entry:
-  %arrayidx4 = getelementptr inbounds double, double* %P1, i64 1
-  %0 = load double, double* %arrayidx4, align 8, !tbaa !1
-  %1 = load double, double* %P1, align 8, !tbaa !1
+  %arrayidx4 = getelementptr inbounds double, ptr %P1, i64 1
+  %0 = load double, ptr %arrayidx4, align 8, !tbaa !1
+  %1 = load double, ptr %P1, align 8, !tbaa !1
   %2 = insertelement <2 x double> undef, double %0, i32 0
   %3 = insertelement <2 x double> %2, double %1, i32 1
   %4 = fsub <2 x double> zeroinitializer, %3
@@ -21,7 +21,7 @@ entry:
   br i1 %cmp168, label %if.then172, label %return
 
 if.then172:                                       ; preds = %cond.end90
-  %7 = tail call i64 @llvm.objectsize.i64.p0i8(i8* undef, i1 false)
+  %7 = tail call i64 @llvm.objectsize.i64.p0(ptr undef, i1 false)
   br label %return
 
 return:                                           ; preds = %if.then172, %cond.end90, %entry
@@ -30,28 +30,28 @@ return:                                           ; preds = %if.then172, %cond.e
 
 ; Avoid an assert/bad codegen in LD1LANEPOST lowering by not forming
 ; LD1LANEPOST ISD nodes with a non-constant lane index.
-define <4 x i32> @f2(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2, i32 %idx) {
-  %L0 = load i32, i32* %p
-  %p1 = getelementptr i32, i32* %p, i64 1
-  %L1 = load i32, i32* %p1
+define <4 x i32> @f2(ptr %p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2, i32 %idx) {
+  %L0 = load i32, ptr %p
+  %p1 = getelementptr i32, ptr %p, i64 1
+  %L1 = load i32, ptr %p1
   %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
   %vret = insertelement <4 x i32> %v, i32 %L0, i32 %idx
-  store i32 %L1, i32 *%p
+  store i32 %L1, ptr %p
   ret <4 x i32> %vret
 }
 
 ; Check that a cycle is avoided during isel between the LD1LANEPOST instruction and the load of %L1.
-define <4 x i32> @f3(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2) {
-  %L0 = load i32, i32* %p
-  %p1 = getelementptr i32, i32* %p, i64 1
-  %L1 = load i32, i32* %p1
+define <4 x i32> @f3(ptr %p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2) {
+  %L0 = load i32, ptr %p
+  %p1 = getelementptr i32, ptr %p, i64 1
+  %L1 = load i32, ptr %p1
   %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
   %vret = insertelement <4 x i32> %v, i32 %L0, i32 %L1
   ret <4 x i32> %vret
 }
 
 ; Function Attrs: nounwind readnone
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1
+declare i64 @llvm.objectsize.i64.p0(ptr, i1) #1
 
 attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
index 2bea1d22d52a3..6a82102d24a37 100644
--- a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
@@ -1,748 +1,748 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s
 
- at ptr = global i8* null
+ at ptr = global ptr null
 
-define <8 x i8> @test_v8i8_pre_load(<8 x i8>* %addr) {
+define <8 x i8> @test_v8i8_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v8i8_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #40]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
-  %val = load <8 x i8>, <8 x i8>* %newaddr, align 8
-  store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+  %newaddr = getelementptr <8 x i8>, ptr %addr, i32 5
+  %val = load <8 x i8>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <8 x i8> %val
 }
 
-define <8 x i8> @test_v8i8_post_load(<8 x i8>* %addr) {
+define <8 x i8> @test_v8i8_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v8i8_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0], #40
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
-  %val = load <8 x i8>, <8 x i8>* %addr, align 8
-  store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+  %newaddr = getelementptr <8 x i8>, ptr %addr, i32 5
+  %val = load <8 x i8>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <8 x i8> %val
 }
 
-define void @test_v8i8_pre_store(<8 x i8> %in, <8 x i8>* %addr) {
+define void @test_v8i8_pre_store(<8 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i8_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0, #40]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
-  store <8 x i8> %in, <8 x i8>* %newaddr, align 8
-  store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+  %newaddr = getelementptr <8 x i8>, ptr %addr, i32 5
+  store <8 x i8> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v8i8_post_store(<8 x i8> %in, <8 x i8>* %addr) {
+define void @test_v8i8_post_store(<8 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i8_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0], #40
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
-  store <8 x i8> %in, <8 x i8>* %addr, align 8
-  store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
+  %newaddr = getelementptr <8 x i8>, ptr %addr, i32 5
+  store <8 x i8> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define <4 x i16> @test_v4i16_pre_load(<4 x i16>* %addr) {
+define <4 x i16> @test_v4i16_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v4i16_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #40]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
-  %val = load <4 x i16>, <4 x i16>* %newaddr, align 8
-  store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+  %newaddr = getelementptr <4 x i16>, ptr %addr, i32 5
+  %val = load <4 x i16>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <4 x i16> %val
 }
 
-define <4 x i16> @test_v4i16_post_load(<4 x i16>* %addr) {
+define <4 x i16> @test_v4i16_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v4i16_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0], #40
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
-  %val = load <4 x i16>, <4 x i16>* %addr, align 8
-  store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+  %newaddr = getelementptr <4 x i16>, ptr %addr, i32 5
+  %val = load <4 x i16>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <4 x i16> %val
 }
 
-define void @test_v4i16_pre_store(<4 x i16> %in, <4 x i16>* %addr) {
+define void @test_v4i16_pre_store(<4 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i16_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0, #40]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
-  store <4 x i16> %in, <4 x i16>* %newaddr, align 8
-  store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+  %newaddr = getelementptr <4 x i16>, ptr %addr, i32 5
+  store <4 x i16> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v4i16_post_store(<4 x i16> %in, <4 x i16>* %addr) {
+define void @test_v4i16_post_store(<4 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i16_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0], #40
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
-  store <4 x i16> %in, <4 x i16>* %addr, align 8
-  store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
+  %newaddr = getelementptr <4 x i16>, ptr %addr, i32 5
+  store <4 x i16> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define <2 x i32> @test_v2i32_pre_load(<2 x i32>* %addr) {
+define <2 x i32> @test_v2i32_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v2i32_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #40]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
-  %val = load <2 x i32>, <2 x i32>* %newaddr, align 8
-  store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+  %newaddr = getelementptr <2 x i32>, ptr %addr, i32 5
+  %val = load <2 x i32>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x i32> %val
 }
 
-define <2 x i32> @test_v2i32_post_load(<2 x i32>* %addr) {
+define <2 x i32> @test_v2i32_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v2i32_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0], #40
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
-  %val = load <2 x i32>, <2 x i32>* %addr, align 8
-  store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+  %newaddr = getelementptr <2 x i32>, ptr %addr, i32 5
+  %val = load <2 x i32>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x i32> %val
 }
 
-define void @test_v2i32_pre_store(<2 x i32> %in, <2 x i32>* %addr) {
+define void @test_v2i32_pre_store(<2 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i32_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0, #40]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
-  store <2 x i32> %in, <2 x i32>* %newaddr, align 8
-  store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+  %newaddr = getelementptr <2 x i32>, ptr %addr, i32 5
+  store <2 x i32> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v2i32_post_store(<2 x i32> %in, <2 x i32>* %addr) {
+define void @test_v2i32_post_store(<2 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i32_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0], #40
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
-  store <2 x i32> %in, <2 x i32>* %addr, align 8
-  store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
+  %newaddr = getelementptr <2 x i32>, ptr %addr, i32 5
+  store <2 x i32> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define <2 x float> @test_v2f32_pre_load(<2 x float>* %addr) {
+define <2 x float> @test_v2f32_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v2f32_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #40]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
-  %val = load <2 x float>, <2 x float>* %newaddr, align 8
-  store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+  %newaddr = getelementptr <2 x float>, ptr %addr, i32 5
+  %val = load <2 x float>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x float> %val
 }
 
-define <2 x float> @test_v2f32_post_load(<2 x float>* %addr) {
+define <2 x float> @test_v2f32_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v2f32_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0], #40
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
-  %val = load <2 x float>, <2 x float>* %addr, align 8
-  store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+  %newaddr = getelementptr <2 x float>, ptr %addr, i32 5
+  %val = load <2 x float>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x float> %val
 }
 
-define void @test_v2f32_pre_store(<2 x float> %in, <2 x float>* %addr) {
+define void @test_v2f32_pre_store(<2 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f32_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0, #40]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
-  store <2 x float> %in, <2 x float>* %newaddr, align 8
-  store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+  %newaddr = getelementptr <2 x float>, ptr %addr, i32 5
+  store <2 x float> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v2f32_post_store(<2 x float> %in, <2 x float>* %addr) {
+define void @test_v2f32_post_store(<2 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f32_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0], #40
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
-  store <2 x float> %in, <2 x float>* %addr, align 8
-  store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
+  %newaddr = getelementptr <2 x float>, ptr %addr, i32 5
+  store <2 x float> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define <1 x i64> @test_v1i64_pre_load(<1 x i64>* %addr) {
+define <1 x i64> @test_v1i64_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v1i64_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #40]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
-  %val = load <1 x i64>, <1 x i64>* %newaddr, align 8
-  store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+  %newaddr = getelementptr <1 x i64>, ptr %addr, i32 5
+  %val = load <1 x i64>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <1 x i64> %val
 }
 
-define <1 x i64> @test_v1i64_post_load(<1 x i64>* %addr) {
+define <1 x i64> @test_v1i64_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v1i64_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0], #40
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
-  %val = load <1 x i64>, <1 x i64>* %addr, align 8
-  store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+  %newaddr = getelementptr <1 x i64>, ptr %addr, i32 5
+  %val = load <1 x i64>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <1 x i64> %val
 }
 
-define void @test_v1i64_pre_store(<1 x i64> %in, <1 x i64>* %addr) {
+define void @test_v1i64_pre_store(<1 x i64> %in, ptr %addr) {
 ; CHECK-LABEL: test_v1i64_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0, #40]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
-  store <1 x i64> %in, <1 x i64>* %newaddr, align 8
-  store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+  %newaddr = getelementptr <1 x i64>, ptr %addr, i32 5
+  store <1 x i64> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v1i64_post_store(<1 x i64> %in, <1 x i64>* %addr) {
+define void @test_v1i64_post_store(<1 x i64> %in, ptr %addr) {
 ; CHECK-LABEL: test_v1i64_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str d0, [x0], #40
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
-  store <1 x i64> %in, <1 x i64>* %addr, align 8
-  store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
+  %newaddr = getelementptr <1 x i64>, ptr %addr, i32 5
+  store <1 x i64> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define <16 x i8> @test_v16i8_pre_load(<16 x i8>* %addr) {
+define <16 x i8> @test_v16i8_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v16i8_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #80]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
-  %val = load <16 x i8>, <16 x i8>* %newaddr, align 8
-  store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+  %newaddr = getelementptr <16 x i8>, ptr %addr, i32 5
+  %val = load <16 x i8>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <16 x i8> %val
 }
 
-define <16 x i8> @test_v16i8_post_load(<16 x i8>* %addr) {
+define <16 x i8> @test_v16i8_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v16i8_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0], #80
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
-  %val = load <16 x i8>, <16 x i8>* %addr, align 8
-  store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+  %newaddr = getelementptr <16 x i8>, ptr %addr, i32 5
+  %val = load <16 x i8>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <16 x i8> %val
 }
 
-define void @test_v16i8_pre_store(<16 x i8> %in, <16 x i8>* %addr) {
+define void @test_v16i8_pre_store(<16 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v16i8_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0, #80]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
-  store <16 x i8> %in, <16 x i8>* %newaddr, align 8
-  store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+  %newaddr = getelementptr <16 x i8>, ptr %addr, i32 5
+  store <16 x i8> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v16i8_post_store(<16 x i8> %in, <16 x i8>* %addr) {
+define void @test_v16i8_post_store(<16 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v16i8_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0], #80
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
-  store <16 x i8> %in, <16 x i8>* %addr, align 8
-  store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
+  %newaddr = getelementptr <16 x i8>, ptr %addr, i32 5
+  store <16 x i8> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define <8 x i16> @test_v8i16_pre_load(<8 x i16>* %addr) {
+define <8 x i16> @test_v8i16_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v8i16_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #80]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
-  %val = load <8 x i16>, <8 x i16>* %newaddr, align 8
-  store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+  %newaddr = getelementptr <8 x i16>, ptr %addr, i32 5
+  %val = load <8 x i16>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <8 x i16> %val
 }
 
-define <8 x i16> @test_v8i16_post_load(<8 x i16>* %addr) {
+define <8 x i16> @test_v8i16_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v8i16_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0], #80
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
-  %val = load <8 x i16>, <8 x i16>* %addr, align 8
-  store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+  %newaddr = getelementptr <8 x i16>, ptr %addr, i32 5
+  %val = load <8 x i16>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <8 x i16> %val
 }
 
-define void @test_v8i16_pre_store(<8 x i16> %in, <8 x i16>* %addr) {
+define void @test_v8i16_pre_store(<8 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i16_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0, #80]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
-  store <8 x i16> %in, <8 x i16>* %newaddr, align 8
-  store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+  %newaddr = getelementptr <8 x i16>, ptr %addr, i32 5
+  store <8 x i16> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v8i16_post_store(<8 x i16> %in, <8 x i16>* %addr) {
+define void @test_v8i16_post_store(<8 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i16_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0], #80
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
-  store <8 x i16> %in, <8 x i16>* %addr, align 8
-  store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
+  %newaddr = getelementptr <8 x i16>, ptr %addr, i32 5
+  store <8 x i16> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define <4 x i32> @test_v4i32_pre_load(<4 x i32>* %addr) {
+define <4 x i32> @test_v4i32_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v4i32_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #80]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
-  %val = load <4 x i32>, <4 x i32>* %newaddr, align 8
-  store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+  %newaddr = getelementptr <4 x i32>, ptr %addr, i32 5
+  %val = load <4 x i32>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <4 x i32> %val
 }
 
-define <4 x i32> @test_v4i32_post_load(<4 x i32>* %addr) {
+define <4 x i32> @test_v4i32_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v4i32_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0], #80
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
-  %val = load <4 x i32>, <4 x i32>* %addr, align 8
-  store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+  %newaddr = getelementptr <4 x i32>, ptr %addr, i32 5
+  %val = load <4 x i32>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <4 x i32> %val
 }
 
-define void @test_v4i32_pre_store(<4 x i32> %in, <4 x i32>* %addr) {
+define void @test_v4i32_pre_store(<4 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i32_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0, #80]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
-  store <4 x i32> %in, <4 x i32>* %newaddr, align 8
-  store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+  %newaddr = getelementptr <4 x i32>, ptr %addr, i32 5
+  store <4 x i32> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v4i32_post_store(<4 x i32> %in, <4 x i32>* %addr) {
+define void @test_v4i32_post_store(<4 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i32_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0], #80
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
-  store <4 x i32> %in, <4 x i32>* %addr, align 8
-  store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
+  %newaddr = getelementptr <4 x i32>, ptr %addr, i32 5
+  store <4 x i32> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
 
-define <4 x float> @test_v4f32_pre_load(<4 x float>* %addr) {
+define <4 x float> @test_v4f32_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v4f32_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #80]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
-  %val = load <4 x float>, <4 x float>* %newaddr, align 8
-  store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+  %newaddr = getelementptr <4 x float>, ptr %addr, i32 5
+  %val = load <4 x float>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <4 x float> %val
 }
 
-define <4 x float> @test_v4f32_post_load(<4 x float>* %addr) {
+define <4 x float> @test_v4f32_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v4f32_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0], #80
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
-  %val = load <4 x float>, <4 x float>* %addr, align 8
-  store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+  %newaddr = getelementptr <4 x float>, ptr %addr, i32 5
+  %val = load <4 x float>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <4 x float> %val
 }
 
-define void @test_v4f32_pre_store(<4 x float> %in, <4 x float>* %addr) {
+define void @test_v4f32_pre_store(<4 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4f32_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0, #80]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
-  store <4 x float> %in, <4 x float>* %newaddr, align 8
-  store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+  %newaddr = getelementptr <4 x float>, ptr %addr, i32 5
+  store <4 x float> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v4f32_post_store(<4 x float> %in, <4 x float>* %addr) {
+define void @test_v4f32_post_store(<4 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4f32_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0], #80
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
-  store <4 x float> %in, <4 x float>* %addr, align 8
-  store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
+  %newaddr = getelementptr <4 x float>, ptr %addr, i32 5
+  store <4 x float> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
 
-define <2 x i64> @test_v2i64_pre_load(<2 x i64>* %addr) {
+define <2 x i64> @test_v2i64_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v2i64_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #80]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
-  %val = load <2 x i64>, <2 x i64>* %newaddr, align 8
-  store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+  %newaddr = getelementptr <2 x i64>, ptr %addr, i32 5
+  %val = load <2 x i64>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x i64> %val
 }
 
-define <2 x i64> @test_v2i64_post_load(<2 x i64>* %addr) {
+define <2 x i64> @test_v2i64_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v2i64_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0], #80
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
-  %val = load <2 x i64>, <2 x i64>* %addr, align 8
-  store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+  %newaddr = getelementptr <2 x i64>, ptr %addr, i32 5
+  %val = load <2 x i64>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x i64> %val
 }
 
-define void @test_v2i64_pre_store(<2 x i64> %in, <2 x i64>* %addr) {
+define void @test_v2i64_pre_store(<2 x i64> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i64_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0, #80]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
-  store <2 x i64> %in, <2 x i64>* %newaddr, align 8
-  store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+  %newaddr = getelementptr <2 x i64>, ptr %addr, i32 5
+  store <2 x i64> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v2i64_post_store(<2 x i64> %in, <2 x i64>* %addr) {
+define void @test_v2i64_post_store(<2 x i64> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i64_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0], #80
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
-  store <2 x i64> %in, <2 x i64>* %addr, align 8
-  store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
+  %newaddr = getelementptr <2 x i64>, ptr %addr, i32 5
+  store <2 x i64> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
 
-define <2 x double> @test_v2f64_pre_load(<2 x double>* %addr) {
+define <2 x double> @test_v2f64_pre_load(ptr %addr) {
 ; CHECK-LABEL: test_v2f64_pre_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #80]!
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
-  %val = load <2 x double>, <2 x double>* %newaddr, align 8
-  store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+  %newaddr = getelementptr <2 x double>, ptr %addr, i32 5
+  %val = load <2 x double>, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x double> %val
 }
 
-define <2 x double> @test_v2f64_post_load(<2 x double>* %addr) {
+define <2 x double> @test_v2f64_post_load(ptr %addr) {
 ; CHECK-LABEL: test_v2f64_post_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0], #80
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
-  %val = load <2 x double>, <2 x double>* %addr, align 8
-  store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+  %newaddr = getelementptr <2 x double>, ptr %addr, i32 5
+  %val = load <2 x double>, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret <2 x double> %val
 }
 
-define void @test_v2f64_pre_store(<2 x double> %in, <2 x double>* %addr) {
+define void @test_v2f64_pre_store(<2 x double> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f64_pre_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0, #80]!
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
-  store <2 x double> %in, <2 x double>* %newaddr, align 8
-  store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+  %newaddr = getelementptr <2 x double>, ptr %addr, i32 5
+  store <2 x double> %in, ptr %newaddr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define void @test_v2f64_post_store(<2 x double> %in, <2 x double>* %addr) {
+define void @test_v2f64_post_store(<2 x double> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f64_post_store:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    adrp x8, _ptr at PAGE
 ; CHECK-NEXT:    str q0, [x0], #80
 ; CHECK-NEXT:    str x0, [x8, _ptr at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
-  store <2 x double> %in, <2 x double>* %addr, align 8
-  store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
+  %newaddr = getelementptr <2 x double>, ptr %addr, i32 5
+  store <2 x double> %in, ptr %addr, align 8
+  store ptr %newaddr, ptr @ptr
   ret void
 }
 
-define i8* @test_v16i8_post_imm_st1_lane(<16 x i8> %in, i8* %addr) {
+define ptr @test_v16i8_post_imm_st1_lane(<16 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v16i8_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    st1.b { v0 }[3], [x0], #1
 ; CHECK-NEXT:    ret
   %elt = extractelement <16 x i8> %in, i32 3
-  store i8 %elt, i8* %addr
+  store i8 %elt, ptr %addr
 
-  %newaddr = getelementptr i8, i8* %addr, i32 1
-  ret i8* %newaddr
+  %newaddr = getelementptr i8, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define i8* @test_v16i8_post_reg_st1_lane(<16 x i8> %in, i8* %addr) {
+define ptr @test_v16i8_post_reg_st1_lane(<16 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v16i8_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #2
 ; CHECK-NEXT:    st1.b { v0 }[3], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <16 x i8> %in, i32 3
-  store i8 %elt, i8* %addr
+  store i8 %elt, ptr %addr
 
-  %newaddr = getelementptr i8, i8* %addr, i32 2
-  ret i8* %newaddr
+  %newaddr = getelementptr i8, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
 
-define i16* @test_v8i16_post_imm_st1_lane(<8 x i16> %in, i16* %addr) {
+define ptr @test_v8i16_post_imm_st1_lane(<8 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i16_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    st1.h { v0 }[3], [x0], #2
 ; CHECK-NEXT:    ret
   %elt = extractelement <8 x i16> %in, i32 3
-  store i16 %elt, i16* %addr
+  store i16 %elt, ptr %addr
 
-  %newaddr = getelementptr i16, i16* %addr, i32 1
-  ret i16* %newaddr
+  %newaddr = getelementptr i16, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define i16* @test_v8i16_post_reg_st1_lane(<8 x i16> %in, i16* %addr) {
+define ptr @test_v8i16_post_reg_st1_lane(<8 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i16_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
 ; CHECK-NEXT:    st1.h { v0 }[3], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <8 x i16> %in, i32 3
-  store i16 %elt, i16* %addr
+  store i16 %elt, ptr %addr
 
-  %newaddr = getelementptr i16, i16* %addr, i32 2
-  ret i16* %newaddr
+  %newaddr = getelementptr i16, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define i32* @test_v4i32_post_imm_st1_lane(<4 x i32> %in, i32* %addr) {
+define ptr @test_v4i32_post_imm_st1_lane(<4 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i32_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    st1.s { v0 }[3], [x0], #4
 ; CHECK-NEXT:    ret
   %elt = extractelement <4 x i32> %in, i32 3
-  store i32 %elt, i32* %addr
+  store i32 %elt, ptr %addr
 
-  %newaddr = getelementptr i32, i32* %addr, i32 1
-  ret i32* %newaddr
+  %newaddr = getelementptr i32, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define i32* @test_v4i32_post_reg_st1_lane(<4 x i32> %in, i32* %addr) {
+define ptr @test_v4i32_post_reg_st1_lane(<4 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i32_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #8
 ; CHECK-NEXT:    st1.s { v0 }[3], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <4 x i32> %in, i32 3
-  store i32 %elt, i32* %addr
+  store i32 %elt, ptr %addr
 
-  %newaddr = getelementptr i32, i32* %addr, i32 2
-  ret i32* %newaddr
+  %newaddr = getelementptr i32, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define float* @test_v4f32_post_imm_st1_lane(<4 x float> %in, float* %addr) {
+define ptr @test_v4f32_post_imm_st1_lane(<4 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4f32_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    st1.s { v0 }[3], [x0], #4
 ; CHECK-NEXT:    ret
   %elt = extractelement <4 x float> %in, i32 3
-  store float %elt, float* %addr
+  store float %elt, ptr %addr
 
-  %newaddr = getelementptr float, float* %addr, i32 1
-  ret float* %newaddr
+  %newaddr = getelementptr float, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define float* @test_v4f32_post_reg_st1_lane(<4 x float> %in, float* %addr) {
+define ptr @test_v4f32_post_reg_st1_lane(<4 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4f32_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #8
 ; CHECK-NEXT:    st1.s { v0 }[3], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <4 x float> %in, i32 3
-  store float %elt, float* %addr
+  store float %elt, ptr %addr
 
-  %newaddr = getelementptr float, float* %addr, i32 2
-  ret float* %newaddr
+  %newaddr = getelementptr float, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define i64* @test_v2i64_post_imm_st1_lane(<2 x i64> %in, i64* %addr) {
+define ptr @test_v2i64_post_imm_st1_lane(<2 x i64> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i64_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    st1.d { v0 }[1], [x0], #8
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x i64> %in, i64 1
-  store i64 %elt, i64* %addr
+  store i64 %elt, ptr %addr
 
-  %newaddr = getelementptr i64, i64* %addr, i64 1
-  ret i64* %newaddr
+  %newaddr = getelementptr i64, ptr %addr, i64 1
+  ret ptr %newaddr
 }
 
-define i64* @test_v2i64_post_reg_st1_lane(<2 x i64> %in, i64* %addr) {
+define ptr @test_v2i64_post_reg_st1_lane(<2 x i64> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i64_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #16
 ; CHECK-NEXT:    st1.d { v0 }[1], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x i64> %in, i64 1
-  store i64 %elt, i64* %addr
+  store i64 %elt, ptr %addr
 
-  %newaddr = getelementptr i64, i64* %addr, i64 2
-  ret i64* %newaddr
+  %newaddr = getelementptr i64, ptr %addr, i64 2
+  ret ptr %newaddr
 }
 
-define double* @test_v2f64_post_imm_st1_lane(<2 x double> %in, double* %addr) {
+define ptr @test_v2f64_post_imm_st1_lane(<2 x double> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f64_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    st1.d { v0 }[1], [x0], #8
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x double> %in, i32 1
-  store double %elt, double* %addr
+  store double %elt, ptr %addr
 
-  %newaddr = getelementptr double, double* %addr, i32 1
-  ret double* %newaddr
+  %newaddr = getelementptr double, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define double* @test_v2f64_post_reg_st1_lane(<2 x double> %in, double* %addr) {
+define ptr @test_v2f64_post_reg_st1_lane(<2 x double> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f64_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #16
 ; CHECK-NEXT:    st1.d { v0 }[1], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x double> %in, i32 1
-  store double %elt, double* %addr
+  store double %elt, ptr %addr
 
-  %newaddr = getelementptr double, double* %addr, i32 2
-  ret double* %newaddr
+  %newaddr = getelementptr double, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define i8* @test_v8i8_post_imm_st1_lane(<8 x i8> %in, i8* %addr) {
+define ptr @test_v8i8_post_imm_st1_lane(<8 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i8_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    st1.b { v0 }[3], [x0], #1
 ; CHECK-NEXT:    ret
   %elt = extractelement <8 x i8> %in, i32 3
-  store i8 %elt, i8* %addr
+  store i8 %elt, ptr %addr
 
-  %newaddr = getelementptr i8, i8* %addr, i32 1
-  ret i8* %newaddr
+  %newaddr = getelementptr i8, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define i8* @test_v8i8_post_reg_st1_lane(<8 x i8> %in, i8* %addr) {
+define ptr @test_v8i8_post_reg_st1_lane(<8 x i8> %in, ptr %addr) {
 ; CHECK-LABEL: test_v8i8_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #2
@@ -750,26 +750,26 @@ define i8* @test_v8i8_post_reg_st1_lane(<8 x i8> %in, i8* %addr) {
 ; CHECK-NEXT:    st1.b { v0 }[3], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <8 x i8> %in, i32 3
-  store i8 %elt, i8* %addr
+  store i8 %elt, ptr %addr
 
-  %newaddr = getelementptr i8, i8* %addr, i32 2
-  ret i8* %newaddr
+  %newaddr = getelementptr i8, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define i16* @test_v4i16_post_imm_st1_lane(<4 x i16> %in, i16* %addr) {
+define ptr @test_v4i16_post_imm_st1_lane(<4 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i16_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    st1.h { v0 }[3], [x0], #2
 ; CHECK-NEXT:    ret
   %elt = extractelement <4 x i16> %in, i32 3
-  store i16 %elt, i16* %addr
+  store i16 %elt, ptr %addr
 
-  %newaddr = getelementptr i16, i16* %addr, i32 1
-  ret i16* %newaddr
+  %newaddr = getelementptr i16, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define i16* @test_v4i16_post_reg_st1_lane(<4 x i16> %in, i16* %addr) {
+define ptr @test_v4i16_post_reg_st1_lane(<4 x i16> %in, ptr %addr) {
 ; CHECK-LABEL: test_v4i16_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #4
@@ -777,26 +777,26 @@ define i16* @test_v4i16_post_reg_st1_lane(<4 x i16> %in, i16* %addr) {
 ; CHECK-NEXT:    st1.h { v0 }[3], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <4 x i16> %in, i32 3
-  store i16 %elt, i16* %addr
+  store i16 %elt, ptr %addr
 
-  %newaddr = getelementptr i16, i16* %addr, i32 2
-  ret i16* %newaddr
+  %newaddr = getelementptr i16, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define i32* @test_v2i32_post_imm_st1_lane(<2 x i32> %in, i32* %addr) {
+define ptr @test_v2i32_post_imm_st1_lane(<2 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i32_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    st1.s { v0 }[1], [x0], #4
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x i32> %in, i32 1
-  store i32 %elt, i32* %addr
+  store i32 %elt, ptr %addr
 
-  %newaddr = getelementptr i32, i32* %addr, i32 1
-  ret i32* %newaddr
+  %newaddr = getelementptr i32, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define i32* @test_v2i32_post_reg_st1_lane(<2 x i32> %in, i32* %addr) {
+define ptr @test_v2i32_post_reg_st1_lane(<2 x i32> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2i32_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #8
@@ -804,26 +804,26 @@ define i32* @test_v2i32_post_reg_st1_lane(<2 x i32> %in, i32* %addr) {
 ; CHECK-NEXT:    st1.s { v0 }[1], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x i32> %in, i32 1
-  store i32 %elt, i32* %addr
+  store i32 %elt, ptr %addr
 
-  %newaddr = getelementptr i32, i32* %addr, i32 2
-  ret i32* %newaddr
+  %newaddr = getelementptr i32, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define float* @test_v2f32_post_imm_st1_lane(<2 x float> %in, float* %addr) {
+define ptr @test_v2f32_post_imm_st1_lane(<2 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f32_post_imm_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    st1.s { v0 }[1], [x0], #4
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x float> %in, i32 1
-  store float %elt, float* %addr
+  store float %elt, ptr %addr
 
-  %newaddr = getelementptr float, float* %addr, i32 1
-  ret float* %newaddr
+  %newaddr = getelementptr float, ptr %addr, i32 1
+  ret ptr %newaddr
 }
 
-define float* @test_v2f32_post_reg_st1_lane(<2 x float> %in, float* %addr) {
+define ptr @test_v2f32_post_reg_st1_lane(<2 x float> %in, ptr %addr) {
 ; CHECK-LABEL: test_v2f32_post_reg_st1_lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #8
@@ -831,3006 +831,3006 @@ define float* @test_v2f32_post_reg_st1_lane(<2 x float> %in, float* %addr) {
 ; CHECK-NEXT:    st1.s { v0 }[1], [x0], x8
 ; CHECK-NEXT:    ret
   %elt = extractelement <2 x float> %in, i32 1
-  store float %elt, float* %addr
+  store float %elt, ptr %addr
 
-  %newaddr = getelementptr float, float* %addr, i32 2
-  ret float* %newaddr
+  %newaddr = getelementptr float, ptr %addr, i32 2
+  ret ptr %newaddr
 }
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(i8* %A, i8** %ptr) {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.16b { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  store i8* %tmp, i8** %ptr
+  %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld2
 }
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2(i8* %A, i8** %ptr, i64 %inc) {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.16b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld2
 }
 
-declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0(ptr)
 
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2(i8* %A, i8** %ptr) {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.8b { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 16
-  store i8* %tmp, i8** %ptr
+  %ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld2
 }
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2(i8* %A, i8** %ptr, i64 %inc) {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.8b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld2
 }
 
-declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8*)
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0(ptr)
 
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2(i16* %A, i16** %ptr) {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.8h { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  store i16* %tmp, i16** %ptr
+  %ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld2
 }
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2(i16* %A, i16** %ptr, i64 %inc) {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld2.8h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld2
 }
 
-declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16*)
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0(ptr)
 
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2(i16* %A, i16** %ptr) {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.4h { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 8
-  store i16* %tmp, i16** %ptr
+  %ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld2
 }
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2(i16* %A, i16** %ptr, i64 %inc) {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld2.4h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld2
 }
 
-declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16*)
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0(ptr)
 
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2(i32* %A, i32** %ptr) {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  store i32* %tmp, i32** %ptr
+  %ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld2
 }
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2(i32* %A, i32** %ptr, i64 %inc) {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld2
 }
 
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*)
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr)
 
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2(i32* %A, i32** %ptr) {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  store i32* %tmp, i32** %ptr
+  %ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld2
 }
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2(i32* %A, i32** %ptr, i64 %inc) {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld2
 }
 
-declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*)
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr)
 
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2(i64* %A, i64** %ptr) {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld2
 }
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2(i64* %A, i64** %ptr, i64 %inc) {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld2.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld2
 }
 
-declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64*)
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr)
 
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2(i64* %A, i64** %ptr) {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1i64_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 2
-  store i64* %tmp, i64** %ptr
+  %ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld2
 }
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2(i64* %A, i64** %ptr, i64 %inc) {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1i64_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld2
 }
 
-declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64*)
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr)
 
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2(float* %A, float** %ptr) {
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  store float* %tmp, float** %ptr
+  %ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld2
 }
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2(float* %A, float** %ptr, i64 %inc) {
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld2
 }
 
-declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float*)
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr)
 
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2(float* %A, float** %ptr) {
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  store float* %tmp, float** %ptr
+  %ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld2
 }
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2(float* %A, float** %ptr, i64 %inc) {
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld2
 }
 
-declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float*)
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0(ptr)
 
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2(double* %A, double** %ptr) {
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld2
 }
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2(double* %A, double** %ptr, i64 %inc) {
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld2.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld2
 }
 
-declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double*)
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0(ptr)
 
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2(double* %A, double** %ptr) {
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1f64_post_imm_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 2
-  store double* %tmp, double** %ptr
+  %ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld2
 }
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2(double* %A, double** %ptr, i64 %inc) {
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1f64_post_reg_ld2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld2
 }
 
-declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double*)
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0(ptr)
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3(i8* %A, i8** %ptr) {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.16b { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 48
-  store i8* %tmp, i8** %ptr
+  %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 48
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3(i8* %A, i8** %ptr, i64 %inc) {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.16b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0(ptr)
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3(i8* %A, i8** %ptr) {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.8b { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 24
-  store i8* %tmp, i8** %ptr
+  %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 24
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3(i8* %A, i8** %ptr, i64 %inc) {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.8b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8*)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0(ptr)
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3(i16* %A, i16** %ptr) {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.8h { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 24
-  store i16* %tmp, i16** %ptr
+  %ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 24
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3(i16* %A, i16** %ptr, i64 %inc) {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld3.8h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16*)
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0(ptr)
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3(i16* %A, i16** %ptr) {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.4h { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 12
-  store i16* %tmp, i16** %ptr
+  %ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 12
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3(i16* %A, i16** %ptr, i64 %inc) {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld3.4h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr)
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3(i32* %A, i32** %ptr) {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 12
-  store i32* %tmp, i32** %ptr
+  %ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 12
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3(i32* %A, i32** %ptr, i64 %inc) {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32*)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0(ptr)
 
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3(i32* %A, i32** %ptr) {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 6
-  store i32* %tmp, i32** %ptr
+  %ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3(i32* %A, i32** %ptr, i64 %inc) {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32*)
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0(ptr)
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3(i64* %A, i64** %ptr) {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 6
-  store i64* %tmp, i64** %ptr
+  %ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3(i64* %A, i64** %ptr, i64 %inc) {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld3.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64*)
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr)
 
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3(i64* %A, i64** %ptr) {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1i64_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 3
-  store i64* %tmp, i64** %ptr
+  %ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3(i64* %A, i64** %ptr, i64 %inc) {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1i64_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64*)
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr)
 
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3(float* %A, float** %ptr) {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 12
-  store float* %tmp, float** %ptr
+  %ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 12
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld3
 }
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3(float* %A, float** %ptr, i64 %inc) {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld3
 }
 
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr)
 
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3(float* %A, float** %ptr) {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 6
-  store float* %tmp, float** %ptr
+  %ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld3
 }
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3(float* %A, float** %ptr, i64 %inc) {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld3
 }
 
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float*)
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0(ptr)
 
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3(double* %A, double** %ptr) {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 6
-  store double* %tmp, double** %ptr
+  %ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld3
 }
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3(double* %A, double** %ptr, i64 %inc) {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld3.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld3
 }
 
-declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double*)
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0(ptr)
 
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3(double* %A, double** %ptr) {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1f64_post_imm_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 3
-  store double* %tmp, double** %ptr
+  %ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld3
 }
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3(double* %A, double** %ptr, i64 %inc) {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1f64_post_reg_ld3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld3
 }
 
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double*)
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0(ptr)
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4(i8* %A, i8** %ptr) {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.16b { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 64
-  store i8* %tmp, i8** %ptr
+  %ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 64
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4(i8* %A, i8** %ptr, i64 %inc) {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.16b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr)
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4(i8* %A, i8** %ptr) {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.8b { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  store i8* %tmp, i8** %ptr
+  %ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4(i8* %A, i8** %ptr, i64 %inc) {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.8b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8*)
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0(ptr)
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4(i16* %A, i16** %ptr) {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.8h { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 32
-  store i16* %tmp, i16** %ptr
+  %ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 32
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4(i16* %A, i16** %ptr, i64 %inc) {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld4.8h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16*)
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0(ptr)
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4(i16* %A, i16** %ptr) {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.4h { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  store i16* %tmp, i16** %ptr
+  %ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4(i16* %A, i16** %ptr, i64 %inc) {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld4.4h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr)
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4(i32* %A, i32** %ptr) {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 16
-  store i32* %tmp, i32** %ptr
+  %ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4(i32* %A, i32** %ptr, i64 %inc) {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32*)
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0(ptr)
 
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4(i32* %A, i32** %ptr) {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  store i32* %tmp, i32** %ptr
+  %ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4(i32* %A, i32** %ptr, i64 %inc) {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32*)
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0(ptr)
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4(i64* %A, i64** %ptr) {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 8
-  store i64* %tmp, i64** %ptr
+  %ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4(i64* %A, i64** %ptr, i64 %inc) {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld4.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64*)
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr)
 
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4(i64* %A, i64** %ptr) {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1i64_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4(i64* %A, i64** %ptr, i64 %inc) {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1i64_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64*)
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr)
 
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4(float* %A, float** %ptr) {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 16
-  store float* %tmp, float** %ptr
+  %ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
 }
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4(float* %A, float** %ptr, i64 %inc) {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
 }
 
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float*)
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0(ptr)
 
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4(float* %A, float** %ptr) {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  store float* %tmp, float** %ptr
+  %ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
 }
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4(float* %A, float** %ptr, i64 %inc) {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
 }
 
-declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float*)
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0(ptr)
 
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4(double* %A, double** %ptr) {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 8
-  store double* %tmp, double** %ptr
+  %ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
 }
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4(double* %A, double** %ptr, i64 %inc) {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld4.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
 }
 
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double*)
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0(ptr)
 
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4(double* %A, double** %ptr) {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1f64_post_imm_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
 }
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4(double* %A, double** %ptr, i64 %inc) {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1f64_post_reg_ld4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
 }
 
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double*)
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0(ptr)
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x2(i8* %A, i8** %ptr) {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.16b { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  store i8* %tmp, i8** %ptr
+  %ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld1x2
 }
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x2(i8* %A, i8** %ptr, i64 %inc) {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.16b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld1x2
 }
 
-declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0(ptr)
 
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x2(i8* %A, i8** %ptr) {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8b { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 16
-  store i8* %tmp, i8** %ptr
+  %ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld1x2
 }
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x2(i8* %A, i8** %ptr, i64 %inc) {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld1x2
 }
 
-declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8*)
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0(ptr)
 
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x2(i16* %A, i16** %ptr) {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8h { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  store i16* %tmp, i16** %ptr
+  %ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld1x2
 }
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x2(i16* %A, i16** %ptr, i64 %inc) {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1.8h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld1x2
 }
 
-declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16*)
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0(ptr)
 
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x2(i16* %A, i16** %ptr) {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4h { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 8
-  store i16* %tmp, i16** %ptr
+  %ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld1x2
 }
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x2(i16* %A, i16** %ptr, i64 %inc) {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1.4h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld1x2
 }
 
-declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16*)
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0(ptr)
 
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x2(i32* %A, i32** %ptr) {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  store i32* %tmp, i32** %ptr
+  %ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld1x2
 }
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x2(i32* %A, i32** %ptr, i64 %inc) {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld1x2
 }
 
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32*)
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0(ptr)
 
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x2(i32* %A, i32** %ptr) {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  store i32* %tmp, i32** %ptr
+  %ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld1x2
 }
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x2(i32* %A, i32** %ptr, i64 %inc) {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld1x2
 }
 
-declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32*)
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0(ptr)
 
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x2(i64* %A, i64** %ptr) {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld1x2
 }
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x2(i64* %A, i64** %ptr, i64 %inc) {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld1x2
 }
 
-declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64*)
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr)
 
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x2(i64* %A, i64** %ptr) {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1i64_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 2
-  store i64* %tmp, i64** %ptr
+  %ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld1x2
 }
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x2(i64* %A, i64** %ptr, i64 %inc) {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1i64_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld1x2
 }
 
-declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64*)
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr)
 
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x2(float* %A, float** %ptr) {
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  store float* %tmp, float** %ptr
+  %ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld1x2
 }
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x2(float* %A, float** %ptr, i64 %inc) {
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld1x2
 }
 
-declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float*)
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0(ptr)
 
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x2(float* %A, float** %ptr) {
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  store float* %tmp, float** %ptr
+  %ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld1x2
 }
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x2(float* %A, float** %ptr, i64 %inc) {
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld1x2
 }
 
-declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float*)
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0(ptr)
 
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x2(double* %A, double** %ptr) {
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld1x2
 }
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x2(double* %A, double** %ptr, i64 %inc) {
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld1x2
 }
 
-declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double*)
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr)
 
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x2(double* %A, double** %ptr) {
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x2(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1f64_post_imm_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 2
-  store double* %tmp, double** %ptr
+  %ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld1x2
 }
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x2(double* %A, double** %ptr, i64 %inc) {
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x2(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1f64_post_reg_ld1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld1x2
 }
 
-declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double*)
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr)
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x3(i8* %A, i8** %ptr) {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.16b { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 48
-  store i8* %tmp, i8** %ptr
+  %ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 48
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld1x3
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x3(i8* %A, i8** %ptr, i64 %inc) {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.16b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld1x3
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0(ptr)
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x3(i8* %A, i8** %ptr) {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8b { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 24
-  store i8* %tmp, i8** %ptr
+  %ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 24
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld1x3
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x3(i8* %A, i8** %ptr, i64 %inc) {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld1x3
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8*)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0(ptr)
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x3(i16* %A, i16** %ptr) {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8h { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 24
-  store i16* %tmp, i16** %ptr
+  %ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 24
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld1x3
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x3(i16* %A, i16** %ptr, i64 %inc) {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1.8h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld1x3
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16*)
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0(ptr)
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x3(i16* %A, i16** %ptr) {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4h { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 12
-  store i16* %tmp, i16** %ptr
+  %ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 12
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld1x3
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x3(i16* %A, i16** %ptr, i64 %inc) {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1.4h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld1x3
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16*)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0(ptr)
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x3(i32* %A, i32** %ptr) {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 12
-  store i32* %tmp, i32** %ptr
+  %ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 12
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld1x3
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x3(i32* %A, i32** %ptr, i64 %inc) {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld1x3
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32*)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0(ptr)
 
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x3(i32* %A, i32** %ptr) {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 6
-  store i32* %tmp, i32** %ptr
+  %ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld1x3
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x3(i32* %A, i32** %ptr, i64 %inc) {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld1x3
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32*)
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0(ptr)
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x3(i64* %A, i64** %ptr) {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 6
-  store i64* %tmp, i64** %ptr
+  %ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld1x3
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x3(i64* %A, i64** %ptr, i64 %inc) {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld1x3
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64*)
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr)
 
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x3(i64* %A, i64** %ptr) {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1i64_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 3
-  store i64* %tmp, i64** %ptr
+  %ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld1x3
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x3(i64* %A, i64** %ptr, i64 %inc) {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1i64_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld1x3
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64*)
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr)
 
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x3(float* %A, float** %ptr) {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 12
-  store float* %tmp, float** %ptr
+  %ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 12
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld1x3
 }
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x3(float* %A, float** %ptr, i64 %inc) {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld1x3
 }
 
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float*)
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0(ptr)
 
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x3(float* %A, float** %ptr) {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 6
-  store float* %tmp, float** %ptr
+  %ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld1x3
 }
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x3(float* %A, float** %ptr, i64 %inc) {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld1x3
 }
 
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float*)
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0(ptr)
 
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x3(double* %A, double** %ptr) {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 6
-  store double* %tmp, double** %ptr
+  %ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 6
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld1x3
 }
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x3(double* %A, double** %ptr, i64 %inc) {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld1x3
 }
 
-declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double*)
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr)
 
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x3(double* %A, double** %ptr) {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x3(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1f64_post_imm_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 3
-  store double* %tmp, double** %ptr
+  %ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld1x3
 }
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x3(double* %A, double** %ptr, i64 %inc) {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x3(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1f64_post_reg_ld1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld1x3
 }
 
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double*)
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr)
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x4(i8* %A, i8** %ptr) {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.16b { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 64
-  store i8* %tmp, i8** %ptr
+  %ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 64
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld1x4
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x4(i8* %A, i8** %ptr, i64 %inc) {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.16b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld1x4
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8*)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0(ptr)
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x4(i8* %A, i8** %ptr) {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8b { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  store i8* %tmp, i8** %ptr
+  %ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld1x4
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x4(i8* %A, i8** %ptr, i64 %inc) {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld1x4
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8*)
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0(ptr)
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x4(i16* %A, i16** %ptr) {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.8h { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 32
-  store i16* %tmp, i16** %ptr
+  %ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 32
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld1x4
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x4(i16* %A, i16** %ptr, i64 %inc) {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1.8h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld1x4
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16*)
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0(ptr)
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x4(i16* %A, i16** %ptr) {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4h { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  store i16* %tmp, i16** %ptr
+  %ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld1x4
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x4(i16* %A, i16** %ptr, i64 %inc) {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1.4h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld1x4
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16*)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0(ptr)
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x4(i32* %A, i32** %ptr) {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 16
-  store i32* %tmp, i32** %ptr
+  %ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld1x4
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x4(i32* %A, i32** %ptr, i64 %inc) {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld1x4
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32*)
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0(ptr)
 
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x4(i32* %A, i32** %ptr) {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  store i32* %tmp, i32** %ptr
+  %ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld1x4
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x4(i32* %A, i32** %ptr, i64 %inc) {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld1x4
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32*)
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0(ptr)
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x4(i64* %A, i64** %ptr) {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 8
-  store i64* %tmp, i64** %ptr
+  %ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld1x4
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x4(i64* %A, i64** %ptr, i64 %inc) {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld1x4
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64*)
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr)
 
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x4(i64* %A, i64** %ptr) {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1i64_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld1x4
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x4(i64* %A, i64** %ptr, i64 %inc) {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1i64_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld1x4
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64*)
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr)
 
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x4(float* %A, float** %ptr) {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 16
-  store float* %tmp, float** %ptr
+  %ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 16
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld1x4
 }
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x4(float* %A, float** %ptr, i64 %inc) {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld1x4
 }
 
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float*)
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0(ptr)
 
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x4(float* %A, float** %ptr) {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  store float* %tmp, float** %ptr
+  %ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld1x4
 }
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x4(float* %A, float** %ptr, i64 %inc) {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld1x4
 }
 
-declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float*)
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0(ptr)
 
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x4(double* %A, double** %ptr) {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 8
-  store double* %tmp, double** %ptr
+  %ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 8
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld1x4
 }
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x4(double* %A, double** %ptr, i64 %inc) {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld1x4
 }
 
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double*)
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr)
 
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x4(double* %A, double** %ptr) {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld1x4(ptr %A, ptr %ptr) {
 ; CHECK-LABEL: test_v1f64_post_imm_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld1x4
 }
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x4(double* %A, double** %ptr, i64 %inc) {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld1x4(ptr %A, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v1f64_post_reg_ld1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld1x4
 }
 
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double*)
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr)
 
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2r(i8* %A, i8** %ptr) nounwind {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.16b { v0, v1 }, [x0], #2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 2
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld2
 }
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.16b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld2
 }
 
-declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8*) nounwind readonly
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0(ptr) nounwind readonly
 
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2r(i8* %A, i8** %ptr) nounwind {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.8b { v0, v1 }, [x0], #2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 2
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld2
 }
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.8b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld2
 }
 
-declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8*) nounwind readonly
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0(ptr) nounwind readonly
 
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2r(i16* %A, i16** %ptr) nounwind {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.8h { v0, v1 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 2
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld2
 }
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld2r.8h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld2
 }
 
-declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16*) nounwind readonly
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0(ptr) nounwind readonly
 
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2r(i16* %A, i16** %ptr) nounwind {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.4h { v0, v1 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 2
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld2
 }
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld2r.4h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld2
 }
 
-declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16*) nounwind readonly
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0(ptr) nounwind readonly
 
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2r(i32* %A, i32** %ptr) nounwind {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.4s { v0, v1 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 2
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld2
 }
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2r.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld2
 }
 
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32*) nounwind readonly
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0(ptr) nounwind readonly
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2r(i32* %A, i32** %ptr) nounwind {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.2s { v0, v1 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 2
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld2
 }
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2r.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld2
 }
 
-declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32*) nounwind readonly
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0(ptr) nounwind readonly
 
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2r(i64* %A, i64** %ptr) nounwind {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.2d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 2
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld2
 }
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld2r.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld2
 }
 
-declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64*) nounwind readonly
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0(ptr) nounwind readonly
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2r(i64* %A, i64** %ptr) nounwind {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 2
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld2
 }
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld2r.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld2
 }
 
-declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64*) nounwind readonly
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0(ptr) nounwind readonly
 
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2r(float* %A, float** %ptr) nounwind {
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.4s { v0, v1 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 2
-  store float* %tmp, float** %ptr
+  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld2
 }
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2r(float* %A, float** %ptr, i64 %inc) nounwind {
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2r.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld2
 }
 
-declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float*) nounwind readonly
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0(ptr) nounwind readonly
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2r(float* %A, float** %ptr) nounwind {
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.2s { v0, v1 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 2
-  store float* %tmp, float** %ptr
+  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld2
 }
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2r(float* %A, float** %ptr, i64 %inc) nounwind {
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld2r.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld2
 }
 
-declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float*) nounwind readonly
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0(ptr) nounwind readonly
 
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2r(double* %A, double** %ptr) nounwind {
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.2d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 2
-  store double* %tmp, double** %ptr
+  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld2
 }
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2r(double* %A, double** %ptr, i64 %inc) nounwind {
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld2r.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld2
 }
 
-declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double*) nounwind readonly
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0(ptr) nounwind readonly
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2r(double* %A, double** %ptr) nounwind {
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld2r.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 2
-  store double* %tmp, double** %ptr
+  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld2
 }
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2r(double* %A, double** %ptr, i64 %inc) nounwind {
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_ld2r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld2r.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld2
 }
 
-declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double*) nounwind readonly
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0(ptr) nounwind readonly
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3r(i8* %A, i8** %ptr) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.16b { v0, v1, v2 }, [x0], #3
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 3
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.16b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8*) nounwind readonly
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0(ptr) nounwind readonly
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3r(i8* %A, i8** %ptr) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.8b { v0, v1, v2 }, [x0], #3
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 3
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.8b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8*) nounwind readonly
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0(ptr) nounwind readonly
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3r(i16* %A, i16** %ptr) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.8h { v0, v1, v2 }, [x0], #6
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 3
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld3r.8h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16*) nounwind readonly
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0(ptr) nounwind readonly
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3r(i16* %A, i16** %ptr) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.4h { v0, v1, v2 }, [x0], #6
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 3
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld3r.4h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16*) nounwind readonly
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0(ptr) nounwind readonly
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3r(i32* %A, i32** %ptr) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.4s { v0, v1, v2 }, [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 3
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3r.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32*) nounwind readonly
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0(ptr) nounwind readonly
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3r(i32* %A, i32** %ptr) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.2s { v0, v1, v2 }, [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 3
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3r.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32*) nounwind readonly
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0(ptr) nounwind readonly
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3r(i64* %A, i64** %ptr) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.2d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 3
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld3r.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64*) nounwind readonly
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0(ptr) nounwind readonly
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3r(i64* %A, i64** %ptr) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 3
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld3r.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64*) nounwind readonly
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0(ptr) nounwind readonly
 
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3r(float* %A, float** %ptr) nounwind {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.4s { v0, v1, v2 }, [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 3
-  store float* %tmp, float** %ptr
+  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld3
 }
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3r(float* %A, float** %ptr, i64 %inc) nounwind {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3r.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld3
 }
 
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float*) nounwind readonly
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0(ptr) nounwind readonly
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3r(float* %A, float** %ptr) nounwind {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.2s { v0, v1, v2 }, [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 3
-  store float* %tmp, float** %ptr
+  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld3
 }
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3r(float* %A, float** %ptr, i64 %inc) nounwind {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld3r.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld3
 }
 
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float*) nounwind readonly
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0(ptr) nounwind readonly
 
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3r(double* %A, double** %ptr) nounwind {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.2d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 3
-  store double* %tmp, double** %ptr
+  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld3
 }
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3r(double* %A, double** %ptr, i64 %inc) nounwind {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld3r.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld3
 }
 
-declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double*) nounwind readonly
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0(ptr) nounwind readonly
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3r(double* %A, double** %ptr) nounwind {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld3r.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 3
-  store double* %tmp, double** %ptr
+  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld3
 }
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3r(double* %A, double** %ptr, i64 %inc) nounwind {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_ld3r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld3r.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld3
 }
 
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double*) nounwind readonly
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0(ptr) nounwind readonly
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4r(i8* %A, i8** %ptr) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.16b { v0, v1, v2, v3 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 4
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.16b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8*) nounwind readonly
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0(ptr) nounwind readonly
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4r(i8* %A, i8** %ptr) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.8b { v0, v1, v2, v3 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 4
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4r(i8* %A, i8** %ptr, i64 %inc) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.8b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0(ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8*) nounwind readonly
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0(ptr) nounwind readonly
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4r(i16* %A, i16** %ptr) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.8h { v0, v1, v2, v3 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 4
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld4r.8h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16*) nounwind readonly
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0(ptr) nounwind readonly
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4r(i16* %A, i16** %ptr) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.4h { v0, v1, v2, v3 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 4
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4r(i16* %A, i16** %ptr, i64 %inc) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld4r.4h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0(ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16*) nounwind readonly
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0(ptr) nounwind readonly
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4r(i32* %A, i32** %ptr) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.4s { v0, v1, v2, v3 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4r.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32*) nounwind readonly
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0(ptr) nounwind readonly
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4r(i32* %A, i32** %ptr) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.2s { v0, v1, v2, v3 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4r(i32* %A, i32** %ptr, i64 %inc) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4r.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0(ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32*) nounwind readonly
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0(ptr) nounwind readonly
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4r(i64* %A, i64** %ptr) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.2d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld4r.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64*) nounwind readonly
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0(ptr) nounwind readonly
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4r(i64* %A, i64** %ptr) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4r(i64* %A, i64** %ptr, i64 %inc) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld4r.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0(ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64*) nounwind readonly
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0(ptr) nounwind readonly
 
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4r(float* %A, float** %ptr) nounwind {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.4s { v0, v1, v2, v3 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  store float* %tmp, float** %ptr
+  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
 }
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4r(float* %A, float** %ptr, i64 %inc) nounwind {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4r.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
 }
 
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float*) nounwind readonly
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0(ptr) nounwind readonly
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4r(float* %A, float** %ptr) nounwind {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.2s { v0, v1, v2, v3 }, [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  store float* %tmp, float** %ptr
+  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
 }
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4r(float* %A, float** %ptr, i64 %inc) nounwind {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld4r.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0(ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
 }
 
-declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float*) nounwind readonly
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0(ptr) nounwind readonly
 
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4r(double* %A, double** %ptr) nounwind {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.2d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
 }
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4r(double* %A, double** %ptr, i64 %inc) nounwind {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld4r.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
 }
 
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double*) nounwind readonly
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0(ptr) nounwind readonly
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4r(double* %A, double** %ptr) nounwind {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4r(ptr %A, ptr %ptr) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld4r.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
 }
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4r(double* %A, double** %ptr, i64 %inc) nounwind {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4r(ptr %A, ptr %ptr, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_ld4r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld4r.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0(ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
 }
 
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double*) nounwind readonly
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0(ptr) nounwind readonly
 
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
@@ -3838,13 +3838,13 @@ define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2lane(i8* %A, i8** %ptr,
 ; CHECK-NEXT:    ld2.b { v0, v1 }[0], [x0], #2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 2
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld2
 }
 
-define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2lane(i8* %A, i8** %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C) nounwind {
+define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
@@ -3852,16 +3852,16 @@ define { <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld2lane(i8* %A, i8** %ptr,
 ; CHECK-NEXT:    ld2.b { v0, v1 }[0], [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8> } %ld2
 }
 
-declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0(<16 x i8>, <16 x i8>, i64, ptr) nounwind readonly
 
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
@@ -3869,13 +3869,13 @@ define { <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld2lane(i8* %A, i8** %ptr, <8
 ; CHECK-NEXT:    ld2.b { v0, v1 }[0], [x0], #2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 2
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld2
 }
 
-define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2lane(i8* %A, i8** %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C) nounwind {
+define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
@@ -3883,16 +3883,16 @@ define { <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld2lane(i8* %A, i8** %ptr, i64
 ; CHECK-NEXT:    ld2.b { v0, v1 }[0], [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8> } %ld2
 }
 
-declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8>, <8 x i8>, i64, i8*) nounwind readonly
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0(<8 x i8>, <8 x i8>, i64, ptr) nounwind readonly
 
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
@@ -3900,13 +3900,13 @@ define { <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld2lane(i16* %A, i16** %ptr
 ; CHECK-NEXT:    ld2.h { v0, v1 }[0], [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 2
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld2
 }
 
-define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2lane(i16* %A, i16** %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C) nounwind {
+define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -3915,16 +3915,16 @@ define { <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld2lane(i16* %A, i16** %ptr
 ; CHECK-NEXT:    ld2.h { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16> } %ld2
 }
 
-declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0(<8 x i16>, <8 x i16>, i64, ptr) nounwind readonly
 
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
@@ -3932,13 +3932,13 @@ define { <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld2lane(i16* %A, i16** %ptr
 ; CHECK-NEXT:    ld2.h { v0, v1 }[0], [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 2
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld2
 }
 
-define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2lane(i16* %A, i16** %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C) nounwind {
+define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -3947,16 +3947,16 @@ define { <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld2lane(i16* %A, i16** %ptr
 ; CHECK-NEXT:    ld2.h { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16> } %ld2
 }
 
-declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16>, <4 x i16>, i64, i16*) nounwind readonly
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0(<4 x i16>, <4 x i16>, i64, ptr) nounwind readonly
 
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
@@ -3964,13 +3964,13 @@ define { <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld2lane(i32* %A, i32** %ptr
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 2
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld2
 }
 
-define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2lane(i32* %A, i32** %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C) nounwind {
+define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -3979,16 +3979,16 @@ define { <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld2lane(i32* %A, i32** %ptr
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32> } %ld2
 }
 
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32>, <4 x i32>, i64, ptr) nounwind readonly
 
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
@@ -3996,13 +3996,13 @@ define { <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld2lane(i32* %A, i32** %ptr
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 2
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld2
 }
 
-define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2lane(i32* %A, i32** %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C) nounwind {
+define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -4011,16 +4011,16 @@ define { <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld2lane(i32* %A, i32** %ptr
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32> } %ld2
 }
 
-declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32>, <2 x i32>, i64, i32*) nounwind readonly
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0(<2 x i32>, <2 x i32>, i64, ptr) nounwind readonly
 
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
@@ -4028,13 +4028,13 @@ define { <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld2lane(i64* %A, i64** %ptr
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 2
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld2
 }
 
-define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2lane(i64* %A, i64** %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C) nounwind {
+define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -4043,16 +4043,16 @@ define { <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld2lane(i64* %A, i64** %ptr
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64> } %ld2
 }
 
-declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr) nounwind readonly
 
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
@@ -4060,13 +4060,13 @@ define { <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld2lane(i64* %A, i64** %ptr
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 2
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld2
 }
 
-define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2lane(i64* %A, i64** %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C) nounwind {
+define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -4075,16 +4075,16 @@ define { <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld2lane(i64* %A, i64** %ptr
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64> } %ld2
 }
 
-declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*) nounwind readonly
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr) nounwind readonly
 
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
@@ -4092,13 +4092,13 @@ define { <4 x float>, <4 x float> } @test_v4f32_post_imm_ld2lane(float* %A, floa
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 2
-  store float* %tmp, float** %ptr
+  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0(<4 x float> %B, <4 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld2
 }
 
-define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2lane(float* %A, float** %ptr, i64 %inc, <4 x float> %B, <4 x float> %C) nounwind {
+define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <4 x float> %B, <4 x float> %C) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -4107,16 +4107,16 @@ define { <4 x float>, <4 x float> } @test_v4f32_post_reg_ld2lane(float* %A, floa
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0(<4 x float> %B, <4 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float> } %ld2
 }
 
-declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float>, <4 x float>, i64, float*) nounwind readonly
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0(<4 x float>, <4 x float>, i64, ptr) nounwind readonly
 
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
@@ -4124,13 +4124,13 @@ define { <2 x float>, <2 x float> } @test_v2f32_post_imm_ld2lane(float* %A, floa
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 2
-  store float* %tmp, float** %ptr
+  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0(<2 x float> %B, <2 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld2
 }
 
-define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2lane(float* %A, float** %ptr, i64 %inc, <2 x float> %B, <2 x float> %C) nounwind {
+define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <2 x float> %B, <2 x float> %C) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -4139,16 +4139,16 @@ define { <2 x float>, <2 x float> } @test_v2f32_post_reg_ld2lane(float* %A, floa
 ; CHECK-NEXT:    ld2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0(<2 x float> %B, <2 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float> } %ld2
 }
 
-declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float>, <2 x float>, i64, float*) nounwind readonly
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0(<2 x float>, <2 x float>, i64, ptr) nounwind readonly
 
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
@@ -4156,13 +4156,13 @@ define { <2 x double>, <2 x double> } @test_v2f64_post_imm_ld2lane(double* %A, d
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i32 2
-  store double* %tmp, double** %ptr
+  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0(<2 x double> %B, <2 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld2
 }
 
-define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2lane(double* %A, double** %ptr, i64 %inc, <2 x double> %B, <2 x double> %C) nounwind {
+define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <2 x double> %B, <2 x double> %C) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -4171,16 +4171,16 @@ define { <2 x double>, <2 x double> } @test_v2f64_post_reg_ld2lane(double* %A, d
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0(<2 x double> %B, <2 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double> } %ld2
 }
 
-declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double>, <2 x double>, i64, double*) nounwind readonly
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0(<2 x double>, <2 x double>, i64, ptr) nounwind readonly
 
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
@@ -4188,13 +4188,13 @@ define { <1 x double>, <1 x double> } @test_v1f64_post_imm_ld2lane(double* %A, d
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i32 2
-  store double* %tmp, double** %ptr
+  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0(<1 x double> %B, <1 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 2
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld2
 }
 
-define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2lane(double* %A, double** %ptr, i64 %inc, <1 x double> %B, <1 x double> %C) nounwind {
+define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2lane(ptr %A, ptr %ptr, i64 %inc, <1 x double> %B, <1 x double> %C) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_ld2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -4203,16 +4203,16 @@ define { <1 x double>, <1 x double> } @test_v1f64_post_reg_ld2lane(double* %A, d
 ; CHECK-NEXT:    ld2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0(<1 x double> %B, <1 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double> } %ld2
 }
 
-declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double>, <1 x double>, i64, double*) nounwind readonly
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0(<1 x double>, <1 x double>, i64, ptr) nounwind readonly
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4221,13 +4221,13 @@ define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld3lane(i8* %A,
 ; CHECK-NEXT:    ld3.b { v0, v1, v2 }[0], [x0], #3
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 3
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3lane(i8* %A, i8** %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4236,16 +4236,16 @@ define { <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld3lane(i8* %A,
 ; CHECK-NEXT:    ld3.b { v0, v1, v2 }[0], [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, i64, ptr) nounwind readonly
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4254,13 +4254,13 @@ define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld3lane(i8* %A, i8**
 ; CHECK-NEXT:    ld3.b { v0, v1, v2 }[0], [x0], #3
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 3
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3lane(i8* %A, i8** %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4269,16 +4269,16 @@ define { <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld3lane(i8* %A, i8**
 ; CHECK-NEXT:    ld3.b { v0, v1, v2 }[0], [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i64, i8*) nounwind readonly
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, i64, ptr) nounwind readonly
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4287,13 +4287,13 @@ define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld3lane(i16* %A,
 ; CHECK-NEXT:    ld3.h { v0, v1, v2 }[0], [x0], #6
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 3
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3lane(i16* %A, i16** %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4303,16 +4303,16 @@ define { <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld3lane(i16* %A,
 ; CHECK-NEXT:    ld3.h { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, i64, ptr) nounwind readonly
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4321,13 +4321,13 @@ define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld3lane(i16* %A,
 ; CHECK-NEXT:    ld3.h { v0, v1, v2 }[0], [x0], #6
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 3
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3lane(i16* %A, i16** %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4337,16 +4337,16 @@ define { <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld3lane(i16* %A,
 ; CHECK-NEXT:    ld3.h { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i64, i16*) nounwind readonly
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, i64, ptr) nounwind readonly
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4355,13 +4355,13 @@ define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld3lane(i32* %A,
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 3
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3lane(i32* %A, i32** %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4371,16 +4371,16 @@ define { <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld3lane(i32* %A,
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readonly
 
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4389,13 +4389,13 @@ define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld3lane(i32* %A,
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 3
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3lane(i32* %A, i32** %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4405,16 +4405,16 @@ define { <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld3lane(i32* %A,
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i64, i32*) nounwind readonly
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, i64, ptr) nounwind readonly
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4423,13 +4423,13 @@ define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld3lane(i64* %A,
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 3
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3lane(i64* %A, i64** %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4439,16 +4439,16 @@ define { <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld3lane(i64* %A,
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readonly
 
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4457,13 +4457,13 @@ define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld3lane(i64* %A,
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 3
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3lane(i64* %A, i64** %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4473,16 +4473,16 @@ define { <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld3lane(i64* %A,
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*) nounwind readonly
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, i64, ptr) nounwind readonly
 
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4491,13 +4491,13 @@ define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld3lane(fl
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 3
-  store float* %tmp, float** %ptr
+  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld3
 }
 
-define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3lane(float* %A, float** %ptr, i64 %inc, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4507,16 +4507,16 @@ define { <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld3lane(fl
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float> } %ld3
 }
 
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, i64, float*) nounwind readonly
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, i64, ptr) nounwind readonly
 
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4525,13 +4525,13 @@ define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld3lane(fl
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 3
-  store float* %tmp, float** %ptr
+  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld3
 }
 
-define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3lane(float* %A, float** %ptr, i64 %inc, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4541,16 +4541,16 @@ define { <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld3lane(fl
 ; CHECK-NEXT:    ld3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float> } %ld3
 }
 
-declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, i64, float*) nounwind readonly
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, i64, ptr) nounwind readonly
 
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4559,13 +4559,13 @@ define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld3lane
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i32 3
-  store double* %tmp, double** %ptr
+  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld3
 }
 
-define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3lane(double* %A, double** %ptr, i64 %inc, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4575,16 +4575,16 @@ define { <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld3lane
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double> } %ld3
 }
 
-declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, i64, double*) nounwind readonly
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, i64, ptr) nounwind readonly
 
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4593,13 +4593,13 @@ define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld3lane
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i32 3
-  store double* %tmp, double** %ptr
+  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 3
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld3
 }
 
-define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3lane(double* %A, double** %ptr, i64 %inc, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3lane(ptr %A, ptr %ptr, i64 %inc, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_ld3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -4609,16 +4609,16 @@ define { <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld3lane
 ; CHECK-NEXT:    ld3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double> } %ld3
 }
 
-declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, i64, double*) nounwind readonly
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, i64, ptr) nounwind readonly
 
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4628,13 +4628,13 @@ define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld4la
 ; CHECK-NEXT:    ld4.b { v0, v1, v2, v3 }[0], [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 4
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
 }
 
-define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4lane(i8* %A, i8** %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4644,16 +4644,16 @@ define { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @test_v16i8_post_reg_ld4la
 ; CHECK-NEXT:    ld4.b { v0, v1, v2, v3 }[0], [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
 }
 
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, ptr) nounwind readonly
 
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4663,13 +4663,13 @@ define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_imm_ld4lane(i8
 ; CHECK-NEXT:    ld4.b { v0, v1, v2, v3 }[0], [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 4
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
 }
 
-define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4lane(i8* %A, i8** %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4679,16 +4679,16 @@ define { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @test_v8i8_post_reg_ld4lane(i8
 ; CHECK-NEXT:    ld4.b { v0, v1, v2, v3 }[0], [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  store i8* %tmp, i8** %ptr
+  %ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
 }
 
-declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i64, i8*) nounwind readonly
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i64, ptr) nounwind readonly
 
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4698,13 +4698,13 @@ define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_imm_ld4la
 ; CHECK-NEXT:    ld4.h { v0, v1, v2, v3 }[0], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 4
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
 }
 
-define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4lane(i16* %A, i16** %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4715,16 +4715,16 @@ define { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @test_v8i16_post_reg_ld4la
 ; CHECK-NEXT:    ld4.h { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
 }
 
-declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, ptr) nounwind readonly
 
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4734,13 +4734,13 @@ define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_imm_ld4la
 ; CHECK-NEXT:    ld4.h { v0, v1, v2, v3 }[0], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 4
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
 }
 
-define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4lane(i16* %A, i16** %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4751,16 +4751,16 @@ define { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @test_v4i16_post_reg_ld4la
 ; CHECK-NEXT:    ld4.h { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  store i16* %tmp, i16** %ptr
+  %ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
 }
 
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i64, i16*) nounwind readonly
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i64, ptr) nounwind readonly
 
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4770,13 +4770,13 @@ define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_imm_ld4la
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
 }
 
-define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4lane(i32* %A, i32** %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4787,16 +4787,16 @@ define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_v4i32_post_reg_ld4la
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
 }
 
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readonly
 
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4806,13 +4806,13 @@ define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_imm_ld4la
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
 }
 
-define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4lane(i32* %A, i32** %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4823,16 +4823,16 @@ define { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @test_v2i32_post_reg_ld4la
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  store i32* %tmp, i32** %ptr
+  %ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
 }
 
-declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i64, i32*) nounwind readonly
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i64, ptr) nounwind readonly
 
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4842,13 +4842,13 @@ define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_imm_ld4la
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
 }
 
-define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4lane(i64* %A, i64** %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4859,16 +4859,16 @@ define { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @test_v2i64_post_reg_ld4la
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
 }
 
-declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readonly
 
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4878,13 +4878,13 @@ define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_imm_ld4la
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i32 4
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
 }
 
-define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4lane(i64* %A, i64** %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4895,16 +4895,16 @@ define { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @test_v1i64_post_reg_ld4la
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  store i64* %tmp, i64** %ptr
+  %ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
 }
 
-declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, i64*) nounwind readonly
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, ptr) nounwind readonly
 
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_imm_ld4lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4914,13 +4914,13 @@ define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_i
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  store float* %tmp, float** %ptr
+  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
 }
 
-define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4lane(float* %A, float** %ptr, i64 %inc, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4931,16 +4931,16 @@ define { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @test_v4f32_post_r
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
 }
 
-declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, float*) nounwind readonly
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, ptr) nounwind readonly
 
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_imm_ld4lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4950,13 +4950,13 @@ define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_i
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  store float* %tmp, float** %ptr
+  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
 }
 
-define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4lane(float* %A, float** %ptr, i64 %inc, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4967,16 +4967,16 @@ define { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @test_v2f32_post_r
 ; CHECK-NEXT:    ld4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  store float* %tmp, float** %ptr
+  %ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
 }
 
-declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, float*) nounwind readonly
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, ptr) nounwind readonly
 
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_imm_ld4lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -4986,13 +4986,13 @@ define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_po
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
 }
 
-define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4lane(double* %A, double** %ptr, i64 %inc, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5003,16 +5003,16 @@ define { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @test_v2f64_po
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
 }
 
-declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, <2 x double>, i64, double*) nounwind readonly
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, <2 x double>, i64, ptr) nounwind readonly
 
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_imm_ld4lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5022,13 +5022,13 @@ define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_po
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i32 4
-  store double* %tmp, double** %ptr
+  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i32 4
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
 }
 
-define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4lane(double* %A, double** %ptr, i64 %inc, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_post_reg_ld4lane(ptr %A, ptr %ptr, i64 %inc, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_ld4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5039,82 +5039,82 @@ define { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @test_v1f64_po
 ; CHECK-NEXT:    ld4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  store double* %tmp, double** %ptr
+  %ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  store ptr %tmp, ptr %ptr
   ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
 }
 
-declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, i64, double*) nounwind readonly
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, <1 x double>, i64, ptr) nounwind readonly
 
 
-define i8* @test_v16i8_post_imm_st2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+define ptr @test_v16i8_post_imm_st2(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.16b { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> %B, <16 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st2(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.16b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> %B, <16 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8>, <16 x i8>, ptr)
 
 
-define i8* @test_v8i8_post_imm_st2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+define ptr @test_v8i8_post_imm_st2(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.8b { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 16
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %B, <8 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st2(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.8b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %B, <8 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8>, <8 x i8>, ptr)
 
 
-define i16* @test_v8i16_post_imm_st2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+define ptr @test_v8i16_post_imm_st2(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.8h { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> %B, <8 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st2(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -5122,27 +5122,27 @@ define i16* @test_v8i16_post_reg_st2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.8h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> %B, <8 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*)
+declare void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16>, <8 x i16>, ptr)
 
 
-define i16* @test_v4i16_post_imm_st2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+define ptr @test_v4i16_post_imm_st2(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.4h { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 8
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> %B, <4 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st2(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -5150,27 +5150,27 @@ define i16* @test_v4i16_post_reg_st2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.4h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> %B, <4 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*)
+declare void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16>, <4 x i16>, ptr)
 
 
-define i32* @test_v4i32_post_imm_st2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+define ptr @test_v4i32_post_imm_st2(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %B, <4 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st2(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5178,27 +5178,27 @@ define i32* @test_v4i32_post_reg_st2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %B, <4 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*)
+declare void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32>, <4 x i32>, ptr)
 
 
-define i32* @test_v2i32_post_imm_st2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+define ptr @test_v2i32_post_imm_st2(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> %B, <2 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st2(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5206,27 +5206,27 @@ define i32* @test_v2i32_post_reg_st2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> %B, <2 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*)
+declare void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32>, <2 x i32>, ptr)
 
 
-define i64* @test_v2i64_post_imm_st2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+define ptr @test_v2i64_post_imm_st2(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 4
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> %B, <2 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st2(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5234,27 +5234,27 @@ define i64* @test_v2i64_post_reg_st2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> %B, <2 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*)
+declare void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64>, <2 x i64>, ptr)
 
 
-define i64* @test_v1i64_post_imm_st2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+define ptr @test_v1i64_post_imm_st2(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 2
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> %B, <1 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st2(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5262,27 +5262,27 @@ define i64* @test_v1i64_post_reg_st2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> %B, <1 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*)
+declare void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64>, <1 x i64>, ptr)
 
 
-define float* @test_v4f32_post_imm_st2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+define ptr @test_v4f32_post_imm_st2(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> %B, <4 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st2(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5290,27 +5290,27 @@ define float* @test_v4f32_post_reg_st2(float* %A, float** %ptr, <4 x float> %B,
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> %B, <4 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float>, <4 x float>, ptr)
 
 
-define float* @test_v2f32_post_imm_st2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+define ptr @test_v2f32_post_imm_st2(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2.v2f32.p0(<2 x float> %B, <2 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st2(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5318,27 +5318,27 @@ define float* @test_v2f32_post_reg_st2(float* %A, float** %ptr, <2 x float> %B,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st2.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2.v2f32.p0(<2 x float> %B, <2 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float>, <2 x float>, float*)
+declare void @llvm.aarch64.neon.st2.v2f32.p0(<2 x float>, <2 x float>, ptr)
 
 
-define double* @test_v2f64_post_imm_st2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+define ptr @test_v2f64_post_imm_st2(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 4
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2.v2f64.p0(<2 x double> %B, <2 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st2(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5346,27 +5346,27 @@ define double* @test_v2f64_post_reg_st2(double* %A, double** %ptr, <2 x double>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2.v2f64.p0(<2 x double> %B, <2 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double>, <2 x double>, double*)
+declare void @llvm.aarch64.neon.st2.v2f64.p0(<2 x double>, <2 x double>, ptr)
 
 
-define double* @test_v1f64_post_imm_st2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+define ptr @test_v1f64_post_imm_st2(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 2
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2.v1f64.p0(<1 x double> %B, <1 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st2(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5374,15 +5374,15 @@ define double* @test_v1f64_post_reg_st2(double* %A, double** %ptr, <1 x double>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2.v1f64.p0(<1 x double> %B, <1 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double>, <1 x double>, double*)
+declare void @llvm.aarch64.neon.st2.v1f64.p0(<1 x double>, <1 x double>, ptr)
 
 
-define i8* @test_v16i8_post_imm_st3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+define ptr @test_v16i8_post_imm_st3(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -5390,12 +5390,12 @@ define i8* @test_v16i8_post_imm_st3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.16b { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 48
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 48
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st3(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -5403,15 +5403,15 @@ define i8* @test_v16i8_post_reg_st3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.16b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, ptr)
 
 
-define i8* @test_v8i8_post_imm_st3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+define ptr @test_v8i8_post_imm_st3(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -5419,12 +5419,12 @@ define i8* @test_v8i8_post_imm_st3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.8b { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 24
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 24
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st3(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -5432,15 +5432,15 @@ define i8* @test_v8i8_post_reg_st3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.8b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, ptr)
 
 
-define i16* @test_v8i16_post_imm_st3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+define ptr @test_v8i16_post_imm_st3(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -5448,12 +5448,12 @@ define i16* @test_v8i16_post_imm_st3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.8h { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 24
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 24
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st3(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -5462,15 +5462,15 @@ define i16* @test_v8i16_post_reg_st3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.8h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*)
+declare void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, ptr)
 
 
-define i16* @test_v4i16_post_imm_st3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+define ptr @test_v4i16_post_imm_st3(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -5478,12 +5478,12 @@ define i16* @test_v4i16_post_imm_st3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.4h { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 12
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 12
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st3(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -5492,15 +5492,15 @@ define i16* @test_v4i16_post_reg_st3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.4h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*)
+declare void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, ptr)
 
 
-define i32* @test_v4i32_post_imm_st3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+define ptr @test_v4i32_post_imm_st3(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -5508,12 +5508,12 @@ define i32* @test_v4i32_post_imm_st3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 12
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 12
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st3(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5522,15 +5522,15 @@ define i32* @test_v4i32_post_reg_st3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*)
+declare void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, ptr)
 
 
-define i32* @test_v2i32_post_imm_st3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+define ptr @test_v2i32_post_imm_st3(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -5538,12 +5538,12 @@ define i32* @test_v2i32_post_imm_st3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 6
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 6
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st3(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5552,15 +5552,15 @@ define i32* @test_v2i32_post_reg_st3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*)
+declare void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, ptr)
 
 
-define i64* @test_v2i64_post_imm_st3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+define ptr @test_v2i64_post_imm_st3(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -5568,12 +5568,12 @@ define i64* @test_v2i64_post_imm_st3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 6
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 6
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st3(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5582,15 +5582,15 @@ define i64* @test_v2i64_post_reg_st3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*)
+declare void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, ptr)
 
 
-define i64* @test_v1i64_post_imm_st3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+define ptr @test_v1i64_post_imm_st3(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -5598,12 +5598,12 @@ define i64* @test_v1i64_post_imm_st3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 3
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st3(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5612,15 +5612,15 @@ define i64* @test_v1i64_post_reg_st3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*)
+declare void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, ptr)
 
 
-define float* @test_v4f32_post_imm_st3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+define ptr @test_v4f32_post_imm_st3(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -5628,12 +5628,12 @@ define float* @test_v4f32_post_imm_st3(float* %A, float** %ptr, <4 x float> %B,
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i32 12
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 12
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st3(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5642,15 +5642,15 @@ define float* @test_v4f32_post_reg_st3(float* %A, float** %ptr, <4 x float> %B,
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, ptr)
 
 
-define float* @test_v2f32_post_imm_st3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+define ptr @test_v2f32_post_imm_st3(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -5658,12 +5658,12 @@ define float* @test_v2f32_post_imm_st3(float* %A, float** %ptr, <2 x float> %B,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i32 6
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 6
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st3(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -5672,15 +5672,15 @@ define float* @test_v2f32_post_reg_st3(float* %A, float** %ptr, <2 x float> %B,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st3.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, float*)
+declare void @llvm.aarch64.neon.st3.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, ptr)
 
 
-define double* @test_v2f64_post_imm_st3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+define ptr @test_v2f64_post_imm_st3(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -5688,12 +5688,12 @@ define double* @test_v2f64_post_imm_st3(double* %A, double** %ptr, <2 x double>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 6
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 6
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st3(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5702,15 +5702,15 @@ define double* @test_v2f64_post_reg_st3(double* %A, double** %ptr, <2 x double>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, double*)
+declare void @llvm.aarch64.neon.st3.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, ptr)
 
 
-define double* @test_v1f64_post_imm_st3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+define ptr @test_v1f64_post_imm_st3(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -5718,12 +5718,12 @@ define double* @test_v1f64_post_imm_st3(double* %A, double** %ptr, <1 x double>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 3
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st3(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -5732,15 +5732,15 @@ define double* @test_v1f64_post_reg_st3(double* %A, double** %ptr, <1 x double>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, double*)
+declare void @llvm.aarch64.neon.st3.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, ptr)
 
 
-define i8* @test_v16i8_post_imm_st4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+define ptr @test_v16i8_post_imm_st4(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5749,12 +5749,12 @@ define i8* @test_v16i8_post_imm_st4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.16b { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 64
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 64
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st4(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5763,15 +5763,15 @@ define i8* @test_v16i8_post_reg_st4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.16b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, ptr)
 
 
-define i8* @test_v8i8_post_imm_st4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+define ptr @test_v8i8_post_imm_st4(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5780,12 +5780,12 @@ define i8* @test_v8i8_post_imm_st4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.8b { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st4(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5794,15 +5794,15 @@ define i8* @test_v8i8_post_reg_st4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.8b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr)
 
 
-define i16* @test_v8i16_post_imm_st4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+define ptr @test_v8i16_post_imm_st4(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5811,12 +5811,12 @@ define i16* @test_v8i16_post_imm_st4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.8h { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 32
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 32
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st4(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5826,15 +5826,15 @@ define i16* @test_v8i16_post_reg_st4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.8h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*)
+declare void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, ptr)
 
 
-define i16* @test_v4i16_post_imm_st4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+define ptr @test_v4i16_post_imm_st4(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5843,12 +5843,12 @@ define i16* @test_v4i16_post_imm_st4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.4h { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st4(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5858,15 +5858,15 @@ define i16* @test_v4i16_post_reg_st4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.4h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>,<4 x i16>,  i16*)
+declare void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>,<4 x i16>,  ptr)
 
 
-define i32* @test_v4i32_post_imm_st4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+define ptr @test_v4i32_post_imm_st4(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5875,12 +5875,12 @@ define i32* @test_v4i32_post_imm_st4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 16
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st4(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5890,15 +5890,15 @@ define i32* @test_v4i32_post_reg_st4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>,<4 x i32>,  i32*)
+declare void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>,<4 x i32>,  ptr)
 
 
-define i32* @test_v2i32_post_imm_st4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+define ptr @test_v2i32_post_imm_st4(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5907,12 +5907,12 @@ define i32* @test_v2i32_post_imm_st4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st4(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5922,15 +5922,15 @@ define i32* @test_v2i32_post_reg_st4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*)
+declare void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, ptr)
 
 
-define i64* @test_v2i64_post_imm_st4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+define ptr @test_v2i64_post_imm_st4(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5939,12 +5939,12 @@ define i64* @test_v2i64_post_imm_st4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 8
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 8
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st4(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -5954,15 +5954,15 @@ define i64* @test_v2i64_post_reg_st4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>,<2 x i64>,  i64*)
+declare void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>,<2 x i64>,  ptr)
 
 
-define i64* @test_v1i64_post_imm_st4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+define ptr @test_v1i64_post_imm_st4(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5971,12 +5971,12 @@ define i64* @test_v1i64_post_imm_st4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 4
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st4(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -5986,15 +5986,15 @@ define i64* @test_v1i64_post_reg_st4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>,<1 x i64>,  i64*)
+declare void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>,<1 x i64>,  ptr)
 
 
-define float* @test_v4f32_post_imm_st4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+define ptr @test_v4f32_post_imm_st4(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6003,12 +6003,12 @@ define float* @test_v4f32_post_imm_st4(float* %A, float** %ptr, <4 x float> %B,
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i32 16
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st4(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6018,15 +6018,15 @@ define float* @test_v4f32_post_reg_st4(float* %A, float** %ptr, <4 x float> %B,
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, ptr)
 
 
-define float* @test_v2f32_post_imm_st4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+define ptr @test_v2f32_post_imm_st4(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6035,12 +6035,12 @@ define float* @test_v2f32_post_imm_st4(float* %A, float** %ptr, <2 x float> %B,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st4(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6050,15 +6050,15 @@ define float* @test_v2f32_post_reg_st4(float* %A, float** %ptr, <2 x float> %B,
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st4.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, float*)
+declare void @llvm.aarch64.neon.st4.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, <2 x float>, ptr)
 
 
-define double* @test_v2f64_post_imm_st4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+define ptr @test_v2f64_post_imm_st4(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6067,12 +6067,12 @@ define double* @test_v2f64_post_imm_st4(double* %A, double** %ptr, <2 x double>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 8
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 8
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st4(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6082,15 +6082,15 @@ define double* @test_v2f64_post_reg_st4(double* %A, double** %ptr, <2 x double>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>,<2 x double>,  double*)
+declare void @llvm.aarch64.neon.st4.v2f64.p0(<2 x double>, <2 x double>, <2 x double>,<2 x double>,  ptr)
 
 
-define double* @test_v1f64_post_imm_st4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+define ptr @test_v1f64_post_imm_st4(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6099,12 +6099,12 @@ define double* @test_v1f64_post_imm_st4(double* %A, double** %ptr, <1 x double>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 4
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st4(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6114,81 +6114,81 @@ define double* @test_v1f64_post_reg_st4(double* %A, double** %ptr, <1 x double>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, double*)
+declare void @llvm.aarch64.neon.st4.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, <1 x double>, ptr)
 
 
-define i8* @test_v16i8_post_imm_st1x2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+define ptr @test_v16i8_post_imm_st1x2(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.16b { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8> %B, <16 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st1x2(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st1x2(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.16b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8> %B, <16 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8>, <16 x i8>, ptr)
 
 
-define i8* @test_v8i8_post_imm_st1x2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+define ptr @test_v8i8_post_imm_st1x2(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.8b { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 16
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8> %B, <8 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st1x2(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st1x2(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.8b { v0, v1 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8> %B, <8 x i8> %C, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8>, <8 x i8>, ptr)
 
 
-define i16* @test_v8i16_post_imm_st1x2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+define ptr @test_v8i16_post_imm_st1x2(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.8h { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16> %B, <8 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st1x2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st1x2(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -6196,27 +6196,27 @@ define i16* @test_v8i16_post_reg_st1x2(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.8h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16> %B, <8 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*)
+declare void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16>, <8 x i16>, ptr)
 
 
-define i16* @test_v4i16_post_imm_st1x2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+define ptr @test_v4i16_post_imm_st1x2(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.4h { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 8
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16> %B, <4 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st1x2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st1x2(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -6224,27 +6224,27 @@ define i16* @test_v4i16_post_reg_st1x2(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.4h { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16> %B, <4 x i16> %C, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*)
+declare void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16>, <4 x i16>, ptr)
 
 
-define i32* @test_v4i32_post_imm_st1x2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+define ptr @test_v4i32_post_imm_st1x2(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x2.v4i32.p0(<4 x i32> %B, <4 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st1x2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st1x2(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6252,27 +6252,27 @@ define i32* @test_v4i32_post_reg_st1x2(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x2.v4i32.p0(<4 x i32> %B, <4 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*)
+declare void @llvm.aarch64.neon.st1x2.v4i32.p0(<4 x i32>, <4 x i32>, ptr)
 
 
-define i32* @test_v2i32_post_imm_st1x2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+define ptr @test_v2i32_post_imm_st1x2(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2i32.p0(<2 x i32> %B, <2 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st1x2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st1x2(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6280,27 +6280,27 @@ define i32* @test_v2i32_post_reg_st1x2(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2i32.p0(<2 x i32> %B, <2 x i32> %C, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*)
+declare void @llvm.aarch64.neon.st1x2.v2i32.p0(<2 x i32>, <2 x i32>, ptr)
 
 
-define i64* @test_v2i64_post_imm_st1x2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+define ptr @test_v2i64_post_imm_st1x2(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 4
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> %B, <2 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st1x2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st1x2(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6308,27 +6308,27 @@ define i64* @test_v2i64_post_reg_st1x2(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> %B, <2 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*)
+declare void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64>, <2 x i64>, ptr)
 
 
-define i64* @test_v1i64_post_imm_st1x2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+define ptr @test_v1i64_post_imm_st1x2(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 2
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> %B, <1 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st1x2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st1x2(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6336,27 +6336,27 @@ define i64* @test_v1i64_post_reg_st1x2(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> %B, <1 x i64> %C, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*)
+declare void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64>, <1 x i64>, ptr)
 
 
-define float* @test_v4f32_post_imm_st1x2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+define ptr @test_v4f32_post_imm_st1x2(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.4s { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float> %B, <4 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st1x2(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st1x2(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6364,27 +6364,27 @@ define float* @test_v4f32_post_reg_st1x2(float* %A, float** %ptr, <4 x float> %B
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.4s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float> %B, <4 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float>, <4 x float>, ptr)
 
 
-define float* @test_v2f32_post_imm_st1x2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+define ptr @test_v2f32_post_imm_st1x2(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.2s { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2f32.p0(<2 x float> %B, <2 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st1x2(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st1x2(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6392,27 +6392,27 @@ define float* @test_v2f32_post_reg_st1x2(float* %A, float** %ptr, <2 x float> %B
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.2s { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2f32.p0(<2 x float> %B, <2 x float> %C, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float>, <2 x float>, float*)
+declare void @llvm.aarch64.neon.st1x2.v2f32.p0(<2 x float>, <2 x float>, ptr)
 
 
-define double* @test_v2f64_post_imm_st1x2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+define ptr @test_v2f64_post_imm_st1x2(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.2d { v0, v1 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 4
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double> %B, <2 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st1x2(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st1x2(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6420,27 +6420,27 @@ define double* @test_v2f64_post_reg_st1x2(double* %A, double** %ptr, <2 x double
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st1.2d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double> %B, <2 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double>, <2 x double>, double*)
+declare void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double>, <2 x double>, ptr)
 
 
-define double* @test_v1f64_post_imm_st1x2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+define ptr @test_v1f64_post_imm_st1x2(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 2
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double> %B, <1 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st1x2(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st1x2(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st1x2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6448,15 +6448,15 @@ define double* @test_v1f64_post_reg_st1x2(double* %A, double** %ptr, <1 x double
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1 def $d0_d1
 ; CHECK-NEXT:    st1.1d { v0, v1 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double> %B, <1 x double> %C, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double>, <1 x double>, double*)
+declare void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double>, <1 x double>, ptr)
 
 
-define i8* @test_v16i8_post_imm_st1x3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+define ptr @test_v16i8_post_imm_st1x3(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -6464,12 +6464,12 @@ define i8* @test_v16i8_post_imm_st1x3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.16b { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 48
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 48
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st1x3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st1x3(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -6477,15 +6477,15 @@ define i8* @test_v16i8_post_reg_st1x3(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.16b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, ptr)
 
 
-define i8* @test_v8i8_post_imm_st1x3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+define ptr @test_v8i8_post_imm_st1x3(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -6493,12 +6493,12 @@ define i8* @test_v8i8_post_imm_st1x3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.8b { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 24
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 24
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st1x3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st1x3(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -6506,15 +6506,15 @@ define i8* @test_v8i8_post_reg_st1x3(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.8b { v0, v1, v2 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, ptr)
 
 
-define i16* @test_v8i16_post_imm_st1x3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+define ptr @test_v8i16_post_imm_st1x3(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -6522,12 +6522,12 @@ define i16* @test_v8i16_post_imm_st1x3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.8h { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 24
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 24
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st1x3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st1x3(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -6536,15 +6536,15 @@ define i16* @test_v8i16_post_reg_st1x3(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.8h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*)
+declare void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, ptr)
 
 
-define i16* @test_v4i16_post_imm_st1x3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+define ptr @test_v4i16_post_imm_st1x3(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -6552,12 +6552,12 @@ define i16* @test_v4i16_post_imm_st1x3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.4h { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 12
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 12
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st1x3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st1x3(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -6566,15 +6566,15 @@ define i16* @test_v4i16_post_reg_st1x3(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.4h { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*)
+declare void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, ptr)
 
 
-define i32* @test_v4i32_post_imm_st1x3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+define ptr @test_v4i32_post_imm_st1x3(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -6582,12 +6582,12 @@ define i32* @test_v4i32_post_imm_st1x3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 12
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x3.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 12
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st1x3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st1x3(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6596,15 +6596,15 @@ define i32* @test_v4i32_post_reg_st1x3(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x3.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*)
+declare void @llvm.aarch64.neon.st1x3.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, ptr)
 
 
-define i32* @test_v2i32_post_imm_st1x3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+define ptr @test_v2i32_post_imm_st1x3(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -6612,12 +6612,12 @@ define i32* @test_v2i32_post_imm_st1x3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 6
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 6
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st1x3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st1x3(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6626,15 +6626,15 @@ define i32* @test_v2i32_post_reg_st1x3(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*)
+declare void @llvm.aarch64.neon.st1x3.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, ptr)
 
 
-define i64* @test_v2i64_post_imm_st1x3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+define ptr @test_v2i64_post_imm_st1x3(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -6642,12 +6642,12 @@ define i64* @test_v2i64_post_imm_st1x3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 6
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 6
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st1x3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st1x3(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6656,15 +6656,15 @@ define i64* @test_v2i64_post_reg_st1x3(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*)
+declare void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, ptr)
 
 
-define i64* @test_v1i64_post_imm_st1x3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+define ptr @test_v1i64_post_imm_st1x3(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -6672,12 +6672,12 @@ define i64* @test_v1i64_post_imm_st1x3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 3
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st1x3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st1x3(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6686,15 +6686,15 @@ define i64* @test_v1i64_post_reg_st1x3(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*)
+declare void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, ptr)
 
 
-define float* @test_v4f32_post_imm_st1x3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+define ptr @test_v4f32_post_imm_st1x3(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -6702,12 +6702,12 @@ define float* @test_v4f32_post_imm_st1x3(float* %A, float** %ptr, <4 x float> %B
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.4s { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i32 12
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 12
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st1x3(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st1x3(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6716,15 +6716,15 @@ define float* @test_v4f32_post_reg_st1x3(float* %A, float** %ptr, <4 x float> %B
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.4s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, ptr)
 
 
-define float* @test_v2f32_post_imm_st1x3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+define ptr @test_v2f32_post_imm_st1x3(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -6732,12 +6732,12 @@ define float* @test_v2f32_post_imm_st1x3(float* %A, float** %ptr, <2 x float> %B
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.2s { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i32 6
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 6
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st1x3(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st1x3(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -6746,15 +6746,15 @@ define float* @test_v2f32_post_reg_st1x3(float* %A, float** %ptr, <2 x float> %B
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.2s { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, float*)
+declare void @llvm.aarch64.neon.st1x3.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, ptr)
 
 
-define double* @test_v2f64_post_imm_st1x3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+define ptr @test_v2f64_post_imm_st1x3(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -6762,12 +6762,12 @@ define double* @test_v2f64_post_imm_st1x3(double* %A, double** %ptr, <2 x double
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.2d { v0, v1, v2 }, [x0], #48
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 6
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 6
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st1x3(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st1x3(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6776,15 +6776,15 @@ define double* @test_v2f64_post_reg_st1x3(double* %A, double** %ptr, <2 x double
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st1.2d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, double*)
+declare void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, ptr)
 
 
-define double* @test_v1f64_post_imm_st1x3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+define ptr @test_v1f64_post_imm_st1x3(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $d0_d1_d2 def $d0_d1_d2
@@ -6792,12 +6792,12 @@ define double* @test_v1f64_post_imm_st1x3(double* %A, double** %ptr, <1 x double
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 3
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st1x3(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st1x3(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st1x3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -6806,15 +6806,15 @@ define double* @test_v1f64_post_reg_st1x3(double* %A, double** %ptr, <1 x double
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2 def $d0_d1_d2
 ; CHECK-NEXT:    st1.1d { v0, v1, v2 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, double*)
+declare void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, ptr)
 
 
-define i8* @test_v16i8_post_imm_st1x4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+define ptr @test_v16i8_post_imm_st1x4(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6823,12 +6823,12 @@ define i8* @test_v16i8_post_imm_st1x4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.16b { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 64
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 64
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st1x4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st1x4(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6837,15 +6837,15 @@ define i8* @test_v16i8_post_reg_st1x4(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.16b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*)
+declare void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, ptr)
 
 
-define i8* @test_v8i8_post_imm_st1x4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+define ptr @test_v8i8_post_imm_st1x4(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6854,12 +6854,12 @@ define i8* @test_v8i8_post_imm_st1x4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.8b { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 32
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 32
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st1x4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st1x4(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6868,15 +6868,15 @@ define i8* @test_v8i8_post_reg_st1x4(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.8b { v0, v1, v2, v3 }, [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*)
+declare void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr)
 
 
-define i16* @test_v8i16_post_imm_st1x4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+define ptr @test_v8i16_post_imm_st1x4(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6885,12 +6885,12 @@ define i16* @test_v8i16_post_imm_st1x4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.8h { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 32
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 32
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st1x4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st1x4(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6900,15 +6900,15 @@ define i16* @test_v8i16_post_reg_st1x4(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.8h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*)
+declare void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, ptr)
 
 
-define i16* @test_v4i16_post_imm_st1x4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+define ptr @test_v4i16_post_imm_st1x4(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6917,12 +6917,12 @@ define i16* @test_v4i16_post_imm_st1x4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.4h { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 16
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st1x4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st1x4(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6932,15 +6932,15 @@ define i16* @test_v4i16_post_reg_st1x4(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.4h { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>,<4 x i16>,  i16*)
+declare void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>,<4 x i16>,  ptr)
 
 
-define i32* @test_v4i32_post_imm_st1x4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+define ptr @test_v4i32_post_imm_st1x4(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6949,12 +6949,12 @@ define i32* @test_v4i32_post_imm_st1x4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 16
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x4.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st1x4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st1x4(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -6964,15 +6964,15 @@ define i32* @test_v4i32_post_reg_st1x4(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x4.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>,<4 x i32>,  i32*)
+declare void @llvm.aarch64.neon.st1x4.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>,<4 x i32>,  ptr)
 
 
-define i32* @test_v2i32_post_imm_st1x4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+define ptr @test_v2i32_post_imm_st1x4(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6981,12 +6981,12 @@ define i32* @test_v2i32_post_imm_st1x4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 8
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st1x4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st1x4(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -6996,15 +6996,15 @@ define i32* @test_v2i32_post_reg_st1x4(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*)
+declare void @llvm.aarch64.neon.st1x4.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, ptr)
 
 
-define i64* @test_v2i64_post_imm_st1x4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+define ptr @test_v2i64_post_imm_st1x4(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7013,12 +7013,12 @@ define i64* @test_v2i64_post_imm_st1x4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 8
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 8
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st1x4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st1x4(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7028,15 +7028,15 @@ define i64* @test_v2i64_post_reg_st1x4(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>,<2 x i64>,  i64*)
+declare void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>,<2 x i64>,  ptr)
 
 
-define i64* @test_v1i64_post_imm_st1x4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+define ptr @test_v1i64_post_imm_st1x4(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -7045,12 +7045,12 @@ define i64* @test_v1i64_post_imm_st1x4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 4
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st1x4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st1x4(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -7060,15 +7060,15 @@ define i64* @test_v1i64_post_reg_st1x4(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>,<1 x i64>,  i64*)
+declare void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>,<1 x i64>,  ptr)
 
 
-define float* @test_v4f32_post_imm_st1x4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+define ptr @test_v4f32_post_imm_st1x4(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7077,12 +7077,12 @@ define float* @test_v4f32_post_imm_st1x4(float* %A, float** %ptr, <4 x float> %B
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.4s { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i32 16
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 16
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st1x4(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st1x4(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7092,15 +7092,15 @@ define float* @test_v4f32_post_reg_st1x4(float* %A, float** %ptr, <4 x float> %B
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.4s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, ptr)
 
 
-define float* @test_v2f32_post_imm_st1x4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+define ptr @test_v2f32_post_imm_st1x4(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -7109,12 +7109,12 @@ define float* @test_v2f32_post_imm_st1x4(float* %A, float** %ptr, <2 x float> %B
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.2s { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i32 8
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 8
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st1x4(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st1x4(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -7124,15 +7124,15 @@ define float* @test_v2f32_post_reg_st1x4(float* %A, float** %ptr, <2 x float> %B
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.2s { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, float*)
+declare void @llvm.aarch64.neon.st1x4.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, <2 x float>, ptr)
 
 
-define double* @test_v2f64_post_imm_st1x4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+define ptr @test_v2f64_post_imm_st1x4(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7141,12 +7141,12 @@ define double* @test_v2f64_post_imm_st1x4(double* %A, double** %ptr, <2 x double
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.2d { v0, v1, v2, v3 }, [x0], #64
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 8
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 8
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st1x4(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st1x4(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7156,15 +7156,15 @@ define double* @test_v2f64_post_reg_st1x4(double* %A, double** %ptr, <2 x double
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st1.2d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>,<2 x double>,  double*)
+declare void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double>, <2 x double>, <2 x double>,<2 x double>,  ptr)
 
 
-define double* @test_v1f64_post_imm_st1x4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+define ptr @test_v1f64_post_imm_st1x4(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -7173,12 +7173,12 @@ define double* @test_v1f64_post_imm_st1x4(double* %A, double** %ptr, <1 x double
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 4
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st1x4(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st1x4(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st1x4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
@@ -7188,80 +7188,80 @@ define double* @test_v1f64_post_reg_st1x4(double* %A, double** %ptr, <1 x double
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $d0_d1_d2_d3 def $d0_d1_d2_d3
 ; CHECK-NEXT:    st1.1d { v0, v1, v2, v3 }, [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, double*)
+declare void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, <1 x double>, ptr)
 
-define i8* @test_v16i8_post_imm_st2lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
+define ptr @test_v16i8_post_imm_st2lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.b { v0, v1 }[0], [x0], #2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 2
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st2lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st2lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.b { v0, v1 }[0], [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*)
+declare void @llvm.aarch64.neon.st2lane.v16i8.p0(<16 x i8>, <16 x i8>, i64, ptr)
 
 
-define i8* @test_v8i8_post_imm_st2lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
+define ptr @test_v8i8_post_imm_st2lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.b { v0, v1 }[0], [x0], #2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 2
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st2lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st2lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.b { v0, v1 }[0], [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st2lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8>, <8 x i8>, i64, i8*)
+declare void @llvm.aarch64.neon.st2lane.v8i8.p0(<8 x i8>, <8 x i8>, i64, ptr)
 
 
-define i16* @test_v8i16_post_imm_st2lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
+define ptr @test_v8i16_post_imm_st2lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.h { v0, v1 }[0], [x0], #4
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 2
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st2lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st2lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -7269,27 +7269,27 @@ define i16* @test_v8i16_post_reg_st2lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.h { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*)
+declare void @llvm.aarch64.neon.st2lane.v8i16.p0(<8 x i16>, <8 x i16>, i64, ptr)
 
 
-define i16* @test_v4i16_post_imm_st2lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
+define ptr @test_v4i16_post_imm_st2lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.h { v0, v1 }[0], [x0], #4
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 2
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st2lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st2lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -7297,27 +7297,27 @@ define i16* @test_v4i16_post_reg_st2lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.h { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st2lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16>, <4 x i16>, i64, i16*)
+declare void @llvm.aarch64.neon.st2lane.v4i16.p0(<4 x i16>, <4 x i16>, i64, ptr)
 
 
-define i32* @test_v4i32_post_imm_st2lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
+define ptr @test_v4i32_post_imm_st2lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 2
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st2lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st2lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7325,27 +7325,27 @@ define i32* @test_v4i32_post_reg_st2lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*)
+declare void @llvm.aarch64.neon.st2lane.v4i32.p0(<4 x i32>, <4 x i32>, i64, ptr)
 
 
-define i32* @test_v2i32_post_imm_st2lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
+define ptr @test_v2i32_post_imm_st2lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 2
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st2lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st2lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7353,27 +7353,27 @@ define i32* @test_v2i32_post_reg_st2lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32>, <2 x i32>, i64, i32*)
+declare void @llvm.aarch64.neon.st2lane.v2i32.p0(<2 x i32>, <2 x i32>, i64, ptr)
 
 
-define i64* @test_v2i64_post_imm_st2lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
+define ptr @test_v2i64_post_imm_st2lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 2
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st2lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st2lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7381,27 +7381,27 @@ define i64* @test_v2i64_post_reg_st2lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr)
 
 
-define i64* @test_v1i64_post_imm_st2lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
+define ptr @test_v1i64_post_imm_st2lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 2
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st2lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st2lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7409,27 +7409,27 @@ define i64* @test_v1i64_post_reg_st2lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr)
 
 
-define float* @test_v4f32_post_imm_st2lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C) nounwind {
+define ptr @test_v4f32_post_imm_st2lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 2
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2lane.v4f32.p0(<4 x float> %B, <4 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st2lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st2lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7437,27 +7437,27 @@ define float* @test_v4f32_post_reg_st2lane(float* %A, float** %ptr, <4 x float>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2lane.v4f32.p0(<4 x float> %B, <4 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st2lane.v4f32.p0(<4 x float>, <4 x float>, i64, ptr)
 
 
-define float* @test_v2f32_post_imm_st2lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C) nounwind {
+define ptr @test_v2f32_post_imm_st2lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], #8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 2
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2f32.p0(<2 x float> %B, <2 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 2
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st2lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st2lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7465,27 +7465,27 @@ define float* @test_v2f32_post_reg_st2lane(float* %A, float** %ptr, <2 x float>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.s { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2f32.p0(<2 x float> %B, <2 x float> %C, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float>, <2 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st2lane.v2f32.p0(<2 x float>, <2 x float>, i64, ptr)
 
 
-define double* @test_v2f64_post_imm_st2lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C) nounwind {
+define ptr @test_v2f64_post_imm_st2lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 2
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2f64.p0(<2 x double> %B, <2 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st2lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st2lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7493,27 +7493,27 @@ define double* @test_v2f64_post_reg_st2lane(double* %A, double** %ptr, <2 x doub
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2lane.v2f64.p0(<2 x double> %B, <2 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double>, <2 x double>, i64, double*)
+declare void @llvm.aarch64.neon.st2lane.v2f64.p0(<2 x double>, <2 x double>, i64, ptr)
 
 
-define double* @test_v1f64_post_imm_st2lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C) nounwind {
+define ptr @test_v1f64_post_imm_st2lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d1 killed $d1 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 2
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2lane.v1f64.p0(<1 x double> %B, <1 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 2
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st2lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st2lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st2lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7521,15 +7521,15 @@ define double* @test_v1f64_post_reg_st2lane(double* %A, double** %ptr, <1 x doub
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1 def $q0_q1
 ; CHECK-NEXT:    st2.d { v0, v1 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st2lane.v1f64.p0(<1 x double> %B, <1 x double> %C, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double>, <1 x double>, i64, double*)
+declare void @llvm.aarch64.neon.st2lane.v1f64.p0(<1 x double>, <1 x double>, i64, ptr)
 
 
-define i8* @test_v16i8_post_imm_st3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
+define ptr @test_v16i8_post_imm_st3lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7537,12 +7537,12 @@ define i8* @test_v16i8_post_imm_st3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.b { v0, v1, v2 }[0], [x0], #3
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 3
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st3lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7550,15 +7550,15 @@ define i8* @test_v16i8_post_reg_st3lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.b { v0, v1, v2 }[0], [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*)
+declare void @llvm.aarch64.neon.st3lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, i64, ptr)
 
 
-define i8* @test_v8i8_post_imm_st3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
+define ptr @test_v8i8_post_imm_st3lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7566,12 +7566,12 @@ define i8* @test_v8i8_post_imm_st3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.b { v0, v1, v2 }[0], [x0], #3
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 3
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st3lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7579,15 +7579,15 @@ define i8* @test_v8i8_post_reg_st3lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.b { v0, v1, v2 }[0], [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st3lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i64, i8*)
+declare void @llvm.aarch64.neon.st3lane.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, i64, ptr)
 
 
-define i16* @test_v8i16_post_imm_st3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
+define ptr @test_v8i16_post_imm_st3lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7595,12 +7595,12 @@ define i16* @test_v8i16_post_imm_st3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.h { v0, v1, v2 }[0], [x0], #6
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 3
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st3lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -7609,15 +7609,15 @@ define i16* @test_v8i16_post_reg_st3lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.h { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*)
+declare void @llvm.aarch64.neon.st3lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, i64, ptr)
 
 
-define i16* @test_v4i16_post_imm_st3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
+define ptr @test_v4i16_post_imm_st3lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7625,12 +7625,12 @@ define i16* @test_v4i16_post_imm_st3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.h { v0, v1, v2 }[0], [x0], #6
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 3
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st3lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -7639,15 +7639,15 @@ define i16* @test_v4i16_post_reg_st3lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.h { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st3lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i64, i16*)
+declare void @llvm.aarch64.neon.st3lane.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, i64, ptr)
 
 
-define i32* @test_v4i32_post_imm_st3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
+define ptr @test_v4i32_post_imm_st3lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7655,12 +7655,12 @@ define i32* @test_v4i32_post_imm_st3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 3
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st3lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7669,15 +7669,15 @@ define i32* @test_v4i32_post_reg_st3lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
+declare void @llvm.aarch64.neon.st3lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, i64, ptr)
 
 
-define i32* @test_v2i32_post_imm_st3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
+define ptr @test_v2i32_post_imm_st3lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7685,12 +7685,12 @@ define i32* @test_v2i32_post_imm_st3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 3
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st3lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7699,15 +7699,15 @@ define i32* @test_v2i32_post_reg_st3lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i64, i32*)
+declare void @llvm.aarch64.neon.st3lane.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, i64, ptr)
 
 
-define i64* @test_v2i64_post_imm_st3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
+define ptr @test_v2i64_post_imm_st3lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7715,12 +7715,12 @@ define i64* @test_v2i64_post_imm_st3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 3
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st3lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7729,15 +7729,15 @@ define i64* @test_v2i64_post_reg_st3lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, i64, ptr)
 
 
-define i64* @test_v1i64_post_imm_st3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
+define ptr @test_v1i64_post_imm_st3lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7745,12 +7745,12 @@ define i64* @test_v1i64_post_imm_st3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 3
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st3lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7759,15 +7759,15 @@ define i64* @test_v1i64_post_reg_st3lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st3lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st3lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, i64, ptr)
 
 
-define float* @test_v4f32_post_imm_st3lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
+define ptr @test_v4f32_post_imm_st3lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7775,12 +7775,12 @@ define float* @test_v4f32_post_imm_st3lane(float* %A, float** %ptr, <4 x float>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 3
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st3lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st3lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7789,15 +7789,15 @@ define float* @test_v4f32_post_reg_st3lane(float* %A, float** %ptr, <4 x float>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st3lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, i64, ptr)
 
 
-define float* @test_v2f32_post_imm_st3lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
+define ptr @test_v2f32_post_imm_st3lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7805,12 +7805,12 @@ define float* @test_v2f32_post_imm_st3lane(float* %A, float** %ptr, <2 x float>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], #12
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 3
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 3
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st3lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st3lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -7819,15 +7819,15 @@ define float* @test_v2f32_post_reg_st3lane(float* %A, float** %ptr, <2 x float>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.s { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st3lane.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, i64, ptr)
 
 
-define double* @test_v2f64_post_imm_st3lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
+define ptr @test_v2f64_post_imm_st3lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q2 killed $q2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7835,12 +7835,12 @@ define double* @test_v2f64_post_imm_st3lane(double* %A, double** %ptr, <2 x doub
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 3
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st3lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st3lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7849,15 +7849,15 @@ define double* @test_v2f64_post_reg_st3lane(double* %A, double** %ptr, <2 x doub
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, i64, double*)
+declare void @llvm.aarch64.neon.st3lane.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, i64, ptr)
 
 
-define double* @test_v1f64_post_imm_st3lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
+define ptr @test_v1f64_post_imm_st3lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d2 killed $d2 killed $q0_q1_q2 def $q0_q1_q2
@@ -7865,12 +7865,12 @@ define double* @test_v1f64_post_imm_st3lane(double* %A, double** %ptr, <1 x doub
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], #24
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 3
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 3
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st3lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st3lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st3lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
@@ -7879,15 +7879,15 @@ define double* @test_v1f64_post_reg_st3lane(double* %A, double** %ptr, <1 x doub
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2 def $q0_q1_q2
 ; CHECK-NEXT:    st3.d { v0, v1, v2 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st3lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, i64, double*)
+declare void @llvm.aarch64.neon.st3lane.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, i64, ptr)
 
 
-define i8* @test_v16i8_post_imm_st4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
+define ptr @test_v16i8_post_imm_st4lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v16i8_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7896,12 +7896,12 @@ define i8* @test_v16i8_post_imm_st4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.b { v0, v1, v2, v3 }[0], [x0], #4
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 4
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i8* @test_v16i8_post_reg_st4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
+define ptr @test_v16i8_post_reg_st4lane(ptr %A, ptr %ptr, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v16i8_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7910,15 +7910,15 @@ define i8* @test_v16i8_post_reg_st4lane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.b { v0, v1, v2, v3 }[0], [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4lane.v16i8.p0(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*)
+declare void @llvm.aarch64.neon.st4lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, ptr)
 
 
-define i8* @test_v8i8_post_imm_st4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
+define ptr @test_v8i8_post_imm_st4lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E) nounwind {
 ; CHECK-LABEL: test_v8i8_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7927,12 +7927,12 @@ define i8* @test_v8i8_post_imm_st4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.b { v0, v1, v2, v3 }[0], [x0], #4
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i32 4
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i8* @test_v8i8_post_reg_st4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
+define ptr @test_v8i8_post_reg_st4lane(ptr %A, ptr %ptr, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i8_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7941,15 +7941,15 @@ define i8* @test_v8i8_post_reg_st4lane(i8* %A, i8** %ptr, <8 x i8> %B, <8 x i8>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.b { v0, v1, v2, v3 }[0], [x0], x2
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
-  %tmp = getelementptr i8, i8* %A, i64 %inc
-  ret i8* %tmp
+  call void @llvm.aarch64.neon.st4lane.v8i8.p0(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, ptr %A)
+  %tmp = getelementptr i8, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i64, i8*)
+declare void @llvm.aarch64.neon.st4lane.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i64, ptr)
 
 
-define i16* @test_v8i16_post_imm_st4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
+define ptr @test_v8i16_post_imm_st4lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v8i16_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7958,12 +7958,12 @@ define i16* @test_v8i16_post_imm_st4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.h { v0, v1, v2, v3 }[0], [x0], #8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 4
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i16* @test_v8i16_post_reg_st4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
+define ptr @test_v8i16_post_reg_st4lane(ptr %A, ptr %ptr, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v8i16_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7973,15 +7973,15 @@ define i16* @test_v8i16_post_reg_st4lane(i16* %A, i16** %ptr, <8 x i16> %B, <8 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.h { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*)
+declare void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, ptr)
 
 
-define i16* @test_v4i16_post_imm_st4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
+define ptr @test_v4i16_post_imm_st4lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E) nounwind {
 ; CHECK-LABEL: test_v4i16_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -7990,12 +7990,12 @@ define i16* @test_v4i16_post_imm_st4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.h { v0, v1, v2, v3 }[0], [x0], #8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i32 4
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i16* @test_v4i16_post_reg_st4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
+define ptr @test_v4i16_post_reg_st4lane(ptr %A, ptr %ptr, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i16_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8005,15 +8005,15 @@ define i16* @test_v4i16_post_reg_st4lane(i16* %A, i16** %ptr, <4 x i16> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.h { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
-  %tmp = getelementptr i16, i16* %A, i64 %inc
-  ret i16* %tmp
+  call void @llvm.aarch64.neon.st4lane.v4i16.p0(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, ptr %A)
+  %tmp = getelementptr i16, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i64, i16*)
+declare void @llvm.aarch64.neon.st4lane.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i64, ptr)
 
 
-define i32* @test_v4i32_post_imm_st4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
+define ptr @test_v4i32_post_imm_st4lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v4i32_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8022,12 +8022,12 @@ define i32* @test_v4i32_post_imm_st4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i32* @test_v4i32_post_reg_st4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
+define ptr @test_v4i32_post_reg_st4lane(ptr %A, ptr %ptr, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4i32_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8037,15 +8037,15 @@ define i32* @test_v4i32_post_reg_st4lane(i32* %A, i32** %ptr, <4 x i32> %B, <4 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
+declare void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr)
 
 
-define i32* @test_v2i32_post_imm_st4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
+define ptr @test_v2i32_post_imm_st4lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E) nounwind {
 ; CHECK-LABEL: test_v2i32_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8054,12 +8054,12 @@ define i32* @test_v2i32_post_imm_st4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i32 4
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define i32* @test_v2i32_post_reg_st4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
+define ptr @test_v2i32_post_reg_st4lane(ptr %A, ptr %ptr, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i32_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8069,15 +8069,15 @@ define i32* @test_v2i32_post_reg_st4lane(i32* %A, i32** %ptr, <2 x i32> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
-  %tmp = getelementptr i32, i32* %A, i64 %inc
-  ret i32* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2i32.p0(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, ptr %A)
+  %tmp = getelementptr i32, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i64, i32*)
+declare void @llvm.aarch64.neon.st4lane.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i64, ptr)
 
 
-define i64* @test_v2i64_post_imm_st4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
+define ptr @test_v2i64_post_imm_st4lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v2i64_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8086,12 +8086,12 @@ define i64* @test_v2i64_post_imm_st4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 4
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define i64* @test_v2i64_post_reg_st4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
+define ptr @test_v2i64_post_reg_st4lane(ptr %A, ptr %ptr, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2i64_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8101,15 +8101,15 @@ define i64* @test_v2i64_post_reg_st4lane(i64* %A, i64** %ptr, <2 x i64> %B, <2 x
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, ptr)
 
 
-define i64* @test_v1i64_post_imm_st4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
+define ptr @test_v1i64_post_imm_st4lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E) nounwind {
 ; CHECK-LABEL: test_v1i64_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8118,12 +8118,12 @@ define i64* @test_v1i64_post_imm_st4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 4
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define i64* @test_v1i64_post_reg_st4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
+define ptr @test_v1i64_post_reg_st4lane(ptr %A, ptr %ptr, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1i64_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8133,15 +8133,15 @@ define i64* @test_v1i64_post_reg_st4lane(i64* %A, i64** %ptr, <1 x i64> %B, <1 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
-  %tmp = getelementptr i64, i64* %A, i64 %inc
-  ret i64* %tmp
+  call void @llvm.aarch64.neon.st4lane.v1i64.p0(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, ptr %A)
+  %tmp = getelementptr i64, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st4lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, ptr)
 
 
-define float* @test_v4f32_post_imm_st4lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
+define ptr @test_v4f32_post_imm_st4lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E) nounwind {
 ; CHECK-LABEL: test_v4f32_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8150,12 +8150,12 @@ define float* @test_v4f32_post_imm_st4lane(float* %A, float** %ptr, <4 x float>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define float* @test_v4f32_post_reg_st4lane(float* %A, float** %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
+define ptr @test_v4f32_post_reg_st4lane(ptr %A, ptr %ptr, <4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v4f32_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8165,15 +8165,15 @@ define float* @test_v4f32_post_reg_st4lane(float* %A, float** %ptr, <4 x float>
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4lane.v4f32.p0(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st4lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, ptr)
 
 
-define float* @test_v2f32_post_imm_st4lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
+define ptr @test_v2f32_post_imm_st4lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E) nounwind {
 ; CHECK-LABEL: test_v2f32_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8182,12 +8182,12 @@ define float* @test_v2f32_post_imm_st4lane(float* %A, float** %ptr, <2 x float>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], #16
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i32 4
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i32 4
+  ret ptr %tmp
 }
 
-define float* @test_v2f32_post_reg_st4lane(float* %A, float** %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
+define ptr @test_v2f32_post_reg_st4lane(ptr %A, ptr %ptr, <2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f32_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8197,15 +8197,15 @@ define float* @test_v2f32_post_reg_st4lane(float* %A, float** %ptr, <2 x float>
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.s { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
-  %tmp = getelementptr float, float* %A, i64 %inc
-  ret float* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2f32.p0(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, ptr %A)
+  %tmp = getelementptr float, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st4lane.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, <2 x float>, i64, ptr)
 
 
-define double* @test_v2f64_post_imm_st4lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
+define ptr @test_v2f64_post_imm_st4lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E) nounwind {
 ; CHECK-LABEL: test_v2f64_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8214,12 +8214,12 @@ define double* @test_v2f64_post_imm_st4lane(double* %A, double** %ptr, <2 x doub
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 4
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define double* @test_v2f64_post_reg_st4lane(double* %A, double** %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
+define ptr @test_v2f64_post_reg_st4lane(ptr %A, ptr %ptr, <2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v2f64_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8229,15 +8229,15 @@ define double* @test_v2f64_post_reg_st4lane(double* %A, double** %ptr, <2 x doub
 ; CHECK-NEXT:    ; kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4lane.v2f64.p0(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, <2 x double>, i64, double*)
+declare void @llvm.aarch64.neon.st4lane.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, <2 x double>, i64, ptr)
 
 
-define double* @test_v1f64_post_imm_st4lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
+define ptr @test_v1f64_post_imm_st4lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E) nounwind {
 ; CHECK-LABEL: test_v1f64_post_imm_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8246,12 +8246,12 @@ define double* @test_v1f64_post_imm_st4lane(double* %A, double** %ptr, <1 x doub
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], #32
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 4
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 4
+  ret ptr %tmp
 }
 
-define double* @test_v1f64_post_reg_st4lane(double* %A, double** %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
+define ptr @test_v1f64_post_reg_st4lane(ptr %A, ptr %ptr, <1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 %inc) nounwind {
 ; CHECK-LABEL: test_v1f64_post_reg_st4lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d3 killed $d3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
@@ -8261,20 +8261,20 @@ define double* @test_v1f64_post_reg_st4lane(double* %A, double** %ptr, <1 x doub
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3
 ; CHECK-NEXT:    st4.d { v0, v1, v2, v3 }[0], [x0], x8
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
-  %tmp = getelementptr double, double* %A, i64 %inc
-  ret double* %tmp
+  call void @llvm.aarch64.neon.st4lane.v1f64.p0(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, ptr %A)
+  %tmp = getelementptr double, ptr %A, i64 %inc
+  ret ptr %tmp
 }
 
-declare void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, i64, double*)
+declare void @llvm.aarch64.neon.st4lane.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, <1 x double>, i64, ptr)
 
-define <16 x i8> @test_v16i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
+define <16 x i8> @test_v16i8_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.16b { v0 }, [x0], #1
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
   %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
   %tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
@@ -8291,18 +8291,18 @@ define <16 x i8> @test_v16i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
   %tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
   %tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
   %tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
-  %tmp18 = getelementptr i8, i8* %bar, i64 1
-  store i8* %tmp18, i8** %ptr
+  %tmp18 = getelementptr i8, ptr %bar, i64 1
+  store ptr %tmp18, ptr %ptr
   ret <16 x i8> %tmp17
 }
 
-define <16 x i8> @test_v16i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
+define <16 x i8> @test_v16i8_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.16b { v0 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
   %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
   %tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
@@ -8319,18 +8319,18 @@ define <16 x i8> @test_v16i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
   %tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
   %tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
   %tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
-  %tmp18 = getelementptr i8, i8* %bar, i64 %inc
-  store i8* %tmp18, i8** %ptr
+  %tmp18 = getelementptr i8, ptr %bar, i64 %inc
+  store ptr %tmp18, ptr %ptr
   ret <16 x i8> %tmp17
 }
 
-define <8 x i8> @test_v8i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
+define <8 x i8> @test_v8i8_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.8b { v0 }, [x0], #1
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
   %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
   %tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
@@ -8339,18 +8339,18 @@ define <8 x i8> @test_v8i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
   %tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
   %tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
   %tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
-  %tmp10 = getelementptr i8, i8* %bar, i64 1
-  store i8* %tmp10, i8** %ptr
+  %tmp10 = getelementptr i8, ptr %bar, i64 1
+  store ptr %tmp10, ptr %ptr
   ret <8 x i8> %tmp9
 }
 
-define <8 x i8> @test_v8i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
+define <8 x i8> @test_v8i8_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.8b { v0 }, [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
   %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
   %tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
@@ -8359,18 +8359,18 @@ define <8 x i8> @test_v8i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
   %tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
   %tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
   %tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
-  %tmp10 = getelementptr i8, i8* %bar, i64 %inc
-  store i8* %tmp10, i8** %ptr
+  %tmp10 = getelementptr i8, ptr %bar, i64 %inc
+  store ptr %tmp10, ptr %ptr
   ret <8 x i8> %tmp9
 }
 
-define <8 x i16> @test_v8i16_post_imm_ld1r(i16* %bar, i16** %ptr) {
+define <8 x i16> @test_v8i16_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.8h { v0 }, [x0], #2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
   %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
   %tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
@@ -8379,19 +8379,19 @@ define <8 x i16> @test_v8i16_post_imm_ld1r(i16* %bar, i16** %ptr) {
   %tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
   %tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
   %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
-  %tmp10 = getelementptr i16, i16* %bar, i64 1
-  store i16* %tmp10, i16** %ptr
+  %tmp10 = getelementptr i16, ptr %bar, i64 1
+  store ptr %tmp10, ptr %ptr
   ret <8 x i16> %tmp9
 }
 
-define <8 x i16> @test_v8i16_post_reg_ld1r(i16* %bar, i16** %ptr, i64 %inc) {
+define <8 x i16> @test_v8i16_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1r.8h { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
   %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
   %tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
@@ -8400,253 +8400,253 @@ define <8 x i16> @test_v8i16_post_reg_ld1r(i16* %bar, i16** %ptr, i64 %inc) {
   %tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
   %tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
   %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
-  %tmp10 = getelementptr i16, i16* %bar, i64 %inc
-  store i16* %tmp10, i16** %ptr
+  %tmp10 = getelementptr i16, ptr %bar, i64 %inc
+  store ptr %tmp10, ptr %ptr
   ret <8 x i16> %tmp9
 }
 
-define <4 x i16> @test_v4i16_post_imm_ld1r(i16* %bar, i16** %ptr) {
+define <4 x i16> @test_v4i16_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.4h { v0 }, [x0], #2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
   %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
   %tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
   %tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
-  %tmp6 = getelementptr i16, i16* %bar, i64 1
-  store i16* %tmp6, i16** %ptr
+  %tmp6 = getelementptr i16, ptr %bar, i64 1
+  store ptr %tmp6, ptr %ptr
   ret <4 x i16> %tmp5
 }
 
-define <4 x i16> @test_v4i16_post_reg_ld1r(i16* %bar, i16** %ptr, i64 %inc) {
+define <4 x i16> @test_v4i16_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1r.4h { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
   %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
   %tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
   %tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
-  %tmp6 = getelementptr i16, i16* %bar, i64 %inc
-  store i16* %tmp6, i16** %ptr
+  %tmp6 = getelementptr i16, ptr %bar, i64 %inc
+  store ptr %tmp6, ptr %ptr
   ret <4 x i16> %tmp5
 }
 
-define <4 x i32> @test_v4i32_post_imm_ld1r(i32* %bar, i32** %ptr) {
+define <4 x i32> @test_v4i32_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.4s { v0 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
   %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
   %tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
   %tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
-  %tmp6 = getelementptr i32, i32* %bar, i64 1
-  store i32* %tmp6, i32** %ptr
+  %tmp6 = getelementptr i32, ptr %bar, i64 1
+  store ptr %tmp6, ptr %ptr
   ret <4 x i32> %tmp5
 }
 
-define <4 x i32> @test_v4i32_post_reg_ld1r(i32* %bar, i32** %ptr, i64 %inc) {
+define <4 x i32> @test_v4i32_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1r.4s { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
   %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
   %tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
   %tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
-  %tmp6 = getelementptr i32, i32* %bar, i64 %inc
-  store i32* %tmp6, i32** %ptr
+  %tmp6 = getelementptr i32, ptr %bar, i64 %inc
+  store ptr %tmp6, ptr %ptr
   ret <4 x i32> %tmp5
 }
 
-define <2 x i32> @test_v2i32_post_imm_ld1r(i32* %bar, i32** %ptr) {
+define <2 x i32> @test_v2i32_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.2s { v0 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
   %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
-  %tmp4 = getelementptr i32, i32* %bar, i64 1
-  store i32* %tmp4, i32** %ptr
+  %tmp4 = getelementptr i32, ptr %bar, i64 1
+  store ptr %tmp4, ptr %ptr
   ret <2 x i32> %tmp3
 }
 
-define <2 x i32> @test_v2i32_post_reg_ld1r(i32* %bar, i32** %ptr, i64 %inc) {
+define <2 x i32> @test_v2i32_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1r.2s { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
   %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
-  %tmp4 = getelementptr i32, i32* %bar, i64 %inc
-  store i32* %tmp4, i32** %ptr
+  %tmp4 = getelementptr i32, ptr %bar, i64 %inc
+  store ptr %tmp4, ptr %ptr
   ret <2 x i32> %tmp3
 }
 
-define <2 x i64> @test_v2i64_post_imm_ld1r(i64* %bar, i64** %ptr) {
+define <2 x i64> @test_v2i64_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.2d { v0 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i64, i64* %bar
+  %tmp1 = load i64, ptr %bar
   %tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
   %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
-  %tmp4 = getelementptr i64, i64* %bar, i64 1
-  store i64* %tmp4, i64** %ptr
+  %tmp4 = getelementptr i64, ptr %bar, i64 1
+  store ptr %tmp4, ptr %ptr
   ret <2 x i64> %tmp3
 }
 
-define <2 x i64> @test_v2i64_post_reg_ld1r(i64* %bar, i64** %ptr, i64 %inc) {
+define <2 x i64> @test_v2i64_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1r.2d { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i64, i64* %bar
+  %tmp1 = load i64, ptr %bar
   %tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
   %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
-  %tmp4 = getelementptr i64, i64* %bar, i64 %inc
-  store i64* %tmp4, i64** %ptr
+  %tmp4 = getelementptr i64, ptr %bar, i64 %inc
+  store ptr %tmp4, ptr %ptr
   ret <2 x i64> %tmp3
 }
 
-define <4 x float> @test_v4f32_post_imm_ld1r(float* %bar, float** %ptr) {
+define <4 x float> @test_v4f32_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.4s { v0 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <4 x float> <float undef, float undef, float undef, float undef>, float %tmp1, i32 0
   %tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
   %tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
   %tmp5 = insertelement <4 x float> %tmp4, float %tmp1, i32 3
-  %tmp6 = getelementptr float, float* %bar, i64 1
-  store float* %tmp6, float** %ptr
+  %tmp6 = getelementptr float, ptr %bar, i64 1
+  store ptr %tmp6, ptr %ptr
   ret <4 x float> %tmp5
 }
 
-define <4 x float> @test_v4f32_post_reg_ld1r(float* %bar, float** %ptr, i64 %inc) {
+define <4 x float> @test_v4f32_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1r.4s { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <4 x float> <float undef, float undef, float undef, float undef>, float %tmp1, i32 0
   %tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
   %tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
   %tmp5 = insertelement <4 x float> %tmp4, float %tmp1, i32 3
-  %tmp6 = getelementptr float, float* %bar, i64 %inc
-  store float* %tmp6, float** %ptr
+  %tmp6 = getelementptr float, ptr %bar, i64 %inc
+  store ptr %tmp6, ptr %ptr
   ret <4 x float> %tmp5
 }
 
-define <2 x float> @test_v2f32_post_imm_ld1r(float* %bar, float** %ptr) {
+define <2 x float> @test_v2f32_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.2s { v0 }, [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
   %tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
-  %tmp4 = getelementptr float, float* %bar, i64 1
-  store float* %tmp4, float** %ptr
+  %tmp4 = getelementptr float, ptr %bar, i64 1
+  store ptr %tmp4, ptr %ptr
   ret <2 x float> %tmp3
 }
 
-define <2 x float> @test_v2f32_post_reg_ld1r(float* %bar, float** %ptr, i64 %inc) {
+define <2 x float> @test_v2f32_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1r.2s { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
   %tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
-  %tmp4 = getelementptr float, float* %bar, i64 %inc
-  store float* %tmp4, float** %ptr
+  %tmp4 = getelementptr float, ptr %bar, i64 %inc
+  store ptr %tmp4, ptr %ptr
   ret <2 x float> %tmp3
 }
 
-define <2 x double> @test_v2f64_post_imm_ld1r(double* %bar, double** %ptr) {
+define <2 x double> @test_v2f64_post_imm_ld1r(ptr %bar, ptr %ptr) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1r.2d { v0 }, [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load double, double* %bar
+  %tmp1 = load double, ptr %bar
   %tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
   %tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
-  %tmp4 = getelementptr double, double* %bar, i64 1
-  store double* %tmp4, double** %ptr
+  %tmp4 = getelementptr double, ptr %bar, i64 1
+  store ptr %tmp4, ptr %ptr
   ret <2 x double> %tmp3
 }
 
-define <2 x double> @test_v2f64_post_reg_ld1r(double* %bar, double** %ptr, i64 %inc) {
+define <2 x double> @test_v2f64_post_reg_ld1r(ptr %bar, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld1r:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1r.2d { v0 }, [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load double, double* %bar
+  %tmp1 = load double, ptr %bar
   %tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
   %tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
-  %tmp4 = getelementptr double, double* %bar, i64 %inc
-  store double* %tmp4, double** %ptr
+  %tmp4 = getelementptr double, ptr %bar, i64 %inc
+  store ptr %tmp4, ptr %ptr
   ret <2 x double> %tmp3
 }
 
-define <16 x i8> @test_v16i8_post_imm_ld1lane(i8* %bar, i8** %ptr, <16 x i8> %A) {
+define <16 x i8> @test_v16i8_post_imm_ld1lane(ptr %bar, ptr %ptr, <16 x i8> %A) {
 ; CHECK-LABEL: test_v16i8_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.b { v0 }[1], [x0], #1
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
-  %tmp3 = getelementptr i8, i8* %bar, i64 1
-  store i8* %tmp3, i8** %ptr
+  %tmp3 = getelementptr i8, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <16 x i8> %tmp2
 }
 
-define <16 x i8> @test_v16i8_post_reg_ld1lane(i8* %bar, i8** %ptr, i64 %inc, <16 x i8> %A) {
+define <16 x i8> @test_v16i8_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <16 x i8> %A) {
 ; CHECK-LABEL: test_v16i8_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.b { v0 }[1], [x0], x2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
-  %tmp3 = getelementptr i8, i8* %bar, i64 %inc
-  store i8* %tmp3, i8** %ptr
+  %tmp3 = getelementptr i8, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <16 x i8> %tmp2
 }
 
-define <8 x i8> @test_v8i8_post_imm_ld1lane(i8* %bar, i8** %ptr, <8 x i8> %A) {
+define <8 x i8> @test_v8i8_post_imm_ld1lane(ptr %bar, ptr %ptr, <8 x i8> %A) {
 ; CHECK-LABEL: test_v8i8_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
@@ -8654,14 +8654,14 @@ define <8 x i8> @test_v8i8_post_imm_ld1lane(i8* %bar, i8** %ptr, <8 x i8> %A) {
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
-  %tmp3 = getelementptr i8, i8* %bar, i64 1
-  store i8* %tmp3, i8** %ptr
+  %tmp3 = getelementptr i8, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <8 x i8> %tmp2
 }
 
-define <8 x i8> @test_v8i8_post_reg_ld1lane(i8* %bar, i8** %ptr, i64 %inc, <8 x i8> %A) {
+define <8 x i8> @test_v8i8_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <8 x i8> %A) {
 ; CHECK-LABEL: test_v8i8_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
@@ -8669,41 +8669,41 @@ define <8 x i8> @test_v8i8_post_reg_ld1lane(i8* %bar, i8** %ptr, i64 %inc, <8 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
-  %tmp3 = getelementptr i8, i8* %bar, i64 %inc
-  store i8* %tmp3, i8** %ptr
+  %tmp3 = getelementptr i8, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <8 x i8> %tmp2
 }
 
-define <8 x i16> @test_v8i16_post_imm_ld1lane(i16* %bar, i16** %ptr, <8 x i16> %A) {
+define <8 x i16> @test_v8i16_post_imm_ld1lane(ptr %bar, ptr %ptr, <8 x i16> %A) {
 ; CHECK-LABEL: test_v8i16_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.h { v0 }[1], [x0], #2
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
-  %tmp3 = getelementptr i16, i16* %bar, i64 1
-  store i16* %tmp3, i16** %ptr
+  %tmp3 = getelementptr i16, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <8 x i16> %tmp2
 }
 
-define <8 x i16> @test_v8i16_post_reg_ld1lane(i16* %bar, i16** %ptr, i64 %inc, <8 x i16> %A) {
+define <8 x i16> @test_v8i16_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <8 x i16> %A) {
 ; CHECK-LABEL: test_v8i16_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
 ; CHECK-NEXT:    ld1.h { v0 }[1], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
-  %tmp3 = getelementptr i16, i16* %bar, i64 %inc
-  store i16* %tmp3, i16** %ptr
+  %tmp3 = getelementptr i16, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <8 x i16> %tmp2
 }
 
-define <4 x i16> @test_v4i16_post_imm_ld1lane(i16* %bar, i16** %ptr, <4 x i16> %A) {
+define <4 x i16> @test_v4i16_post_imm_ld1lane(ptr %bar, ptr %ptr, <4 x i16> %A) {
 ; CHECK-LABEL: test_v4i16_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
@@ -8711,14 +8711,14 @@ define <4 x i16> @test_v4i16_post_imm_ld1lane(i16* %bar, i16** %ptr, <4 x i16> %
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
-  %tmp3 = getelementptr i16, i16* %bar, i64 1
-  store i16* %tmp3, i16** %ptr
+  %tmp3 = getelementptr i16, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <4 x i16> %tmp2
 }
 
-define <4 x i16> @test_v4i16_post_reg_ld1lane(i16* %bar, i16** %ptr, i64 %inc, <4 x i16> %A) {
+define <4 x i16> @test_v4i16_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <4 x i16> %A) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -8727,41 +8727,41 @@ define <4 x i16> @test_v4i16_post_reg_ld1lane(i16* %bar, i16** %ptr, i64 %inc, <
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
-  %tmp3 = getelementptr i16, i16* %bar, i64 %inc
-  store i16* %tmp3, i16** %ptr
+  %tmp3 = getelementptr i16, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <4 x i16> %tmp2
 }
 
-define <4 x i32> @test_v4i32_post_imm_ld1lane(i32* %bar, i32** %ptr, <4 x i32> %A) {
+define <4 x i32> @test_v4i32_post_imm_ld1lane(ptr %bar, ptr %ptr, <4 x i32> %A) {
 ; CHECK-LABEL: test_v4i32_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.s { v0 }[1], [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
-  %tmp3 = getelementptr i32, i32* %bar, i64 1
-  store i32* %tmp3, i32** %ptr
+  %tmp3 = getelementptr i32, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <4 x i32> %tmp2
 }
 
-define <4 x i32> @test_v4i32_post_reg_ld1lane(i32* %bar, i32** %ptr, i64 %inc, <4 x i32> %A) {
+define <4 x i32> @test_v4i32_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <4 x i32> %A) {
 ; CHECK-LABEL: test_v4i32_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.s { v0 }[1], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
-  %tmp3 = getelementptr i32, i32* %bar, i64 %inc
-  store i32* %tmp3, i32** %ptr
+  %tmp3 = getelementptr i32, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <4 x i32> %tmp2
 }
 
-define <2 x i32> @test_v2i32_post_imm_ld1lane(i32* %bar, i32** %ptr, <2 x i32> %A) {
+define <2 x i32> @test_v2i32_post_imm_ld1lane(ptr %bar, ptr %ptr, <2 x i32> %A) {
 ; CHECK-LABEL: test_v2i32_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
@@ -8769,14 +8769,14 @@ define <2 x i32> @test_v2i32_post_imm_ld1lane(i32* %bar, i32** %ptr, <2 x i32> %
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
-  %tmp3 = getelementptr i32, i32* %bar, i64 1
-  store i32* %tmp3, i32** %ptr
+  %tmp3 = getelementptr i32, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <2 x i32> %tmp2
 }
 
-define <2 x i32> @test_v2i32_post_reg_ld1lane(i32* %bar, i32** %ptr, i64 %inc, <2 x i32> %A) {
+define <2 x i32> @test_v2i32_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <2 x i32> %A) {
 ; CHECK-LABEL: test_v2i32_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -8785,68 +8785,68 @@ define <2 x i32> @test_v2i32_post_reg_ld1lane(i32* %bar, i32** %ptr, i64 %inc, <
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
-  %tmp3 = getelementptr i32, i32* %bar, i64 %inc
-  store i32* %tmp3, i32** %ptr
+  %tmp3 = getelementptr i32, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <2 x i32> %tmp2
 }
 
-define <2 x i64> @test_v2i64_post_imm_ld1lane(i64* %bar, i64** %ptr, <2 x i64> %A) {
+define <2 x i64> @test_v2i64_post_imm_ld1lane(ptr %bar, ptr %ptr, <2 x i64> %A) {
 ; CHECK-LABEL: test_v2i64_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.d { v0 }[1], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i64, i64* %bar
+  %tmp1 = load i64, ptr %bar
   %tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
-  %tmp3 = getelementptr i64, i64* %bar, i64 1
-  store i64* %tmp3, i64** %ptr
+  %tmp3 = getelementptr i64, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <2 x i64> %tmp2
 }
 
-define <2 x i64> @test_v2i64_post_reg_ld1lane(i64* %bar, i64** %ptr, i64 %inc, <2 x i64> %A) {
+define <2 x i64> @test_v2i64_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <2 x i64> %A) {
 ; CHECK-LABEL: test_v2i64_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.d { v0 }[1], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i64, i64* %bar
+  %tmp1 = load i64, ptr %bar
   %tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
-  %tmp3 = getelementptr i64, i64* %bar, i64 %inc
-  store i64* %tmp3, i64** %ptr
+  %tmp3 = getelementptr i64, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <2 x i64> %tmp2
 }
 
-define <4 x float> @test_v4f32_post_imm_ld1lane(float* %bar, float** %ptr, <4 x float> %A) {
+define <4 x float> @test_v4f32_post_imm_ld1lane(ptr %bar, ptr %ptr, <4 x float> %A) {
 ; CHECK-LABEL: test_v4f32_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.s { v0 }[1], [x0], #4
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
-  %tmp3 = getelementptr float, float* %bar, i64 1
-  store float* %tmp3, float** %ptr
+  %tmp3 = getelementptr float, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <4 x float> %tmp2
 }
 
-define <4 x float> @test_v4f32_post_reg_ld1lane(float* %bar, float** %ptr, i64 %inc, <4 x float> %A) {
+define <4 x float> @test_v4f32_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <4 x float> %A) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
 ; CHECK-NEXT:    ld1.s { v0 }[1], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
-  %tmp3 = getelementptr float, float* %bar, i64 %inc
-  store float* %tmp3, float** %ptr
+  %tmp3 = getelementptr float, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <4 x float> %tmp2
 }
 
-define <2 x float> @test_v2f32_post_imm_ld1lane(float* %bar, float** %ptr, <2 x float> %A) {
+define <2 x float> @test_v2f32_post_imm_ld1lane(ptr %bar, ptr %ptr, <2 x float> %A) {
 ; CHECK-LABEL: test_v2f32_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
@@ -8854,14 +8854,14 @@ define <2 x float> @test_v2f32_post_imm_ld1lane(float* %bar, float** %ptr, <2 x
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
-  %tmp3 = getelementptr float, float* %bar, i64 1
-  store float* %tmp3, float** %ptr
+  %tmp3 = getelementptr float, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <2 x float> %tmp2
 }
 
-define <2 x float> @test_v2f32_post_reg_ld1lane(float* %bar, float** %ptr, i64 %inc, <2 x float> %A) {
+define <2 x float> @test_v2f32_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <2 x float> %A) {
 ; CHECK-LABEL: test_v2f32_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #2
@@ -8870,42 +8870,42 @@ define <2 x float> @test_v2f32_post_reg_ld1lane(float* %bar, float** %ptr, i64 %
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
-  %tmp3 = getelementptr float, float* %bar, i64 %inc
-  store float* %tmp3, float** %ptr
+  %tmp3 = getelementptr float, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <2 x float> %tmp2
 }
 
-define <2 x double> @test_v2f64_post_imm_ld1lane(double* %bar, double** %ptr, <2 x double> %A) {
+define <2 x double> @test_v2f64_post_imm_ld1lane(ptr %bar, ptr %ptr, <2 x double> %A) {
 ; CHECK-LABEL: test_v2f64_post_imm_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.d { v0 }[1], [x0], #8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load double, double* %bar
+  %tmp1 = load double, ptr %bar
   %tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
-  %tmp3 = getelementptr double, double* %bar, i64 1
-  store double* %tmp3, double** %ptr
+  %tmp3 = getelementptr double, ptr %bar, i64 1
+  store ptr %tmp3, ptr %ptr
   ret <2 x double> %tmp2
 }
 
-define <2 x double> @test_v2f64_post_reg_ld1lane(double* %bar, double** %ptr, i64 %inc, <2 x double> %A) {
+define <2 x double> @test_v2f64_post_reg_ld1lane(ptr %bar, ptr %ptr, i64 %inc, <2 x double> %A) {
 ; CHECK-LABEL: test_v2f64_post_reg_ld1lane:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #3
 ; CHECK-NEXT:    ld1.d { v0 }[1], [x0], x8
 ; CHECK-NEXT:    str x0, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load double, double* %bar
+  %tmp1 = load double, ptr %bar
   %tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
-  %tmp3 = getelementptr double, double* %bar, i64 %inc
-  store double* %tmp3, double** %ptr
+  %tmp3 = getelementptr double, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <2 x double> %tmp2
 }
 
 ; Check for dependencies between the vector and the scalar load.
-define <4 x float> @test_v4f32_post_reg_ld1lane_dep_vec_on_load(float* %bar, float** %ptr, i64 %inc, <4 x float>* %dep_ptr_1, <4 x float>* %dep_ptr_2, <4 x float> %vec) {
+define <4 x float> @test_v4f32_post_reg_ld1lane_dep_vec_on_load(ptr %bar, ptr %ptr, i64 %inc, ptr %dep_ptr_1, ptr %dep_ptr_2, <4 x float> %vec) {
 ; CHECK-LABEL: test_v4f32_post_reg_ld1lane_dep_vec_on_load:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr s1, [x0]
@@ -8915,12 +8915,12 @@ define <4 x float> @test_v4f32_post_reg_ld1lane_dep_vec_on_load(float* %bar, flo
 ; CHECK-NEXT:    mov.s v0[1], v1[0]
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load float, float* %bar
-  store <4 x float> %vec, <4 x float>* %dep_ptr_1, align 16
-  %A = load <4 x float>, <4 x float>* %dep_ptr_2, align 16
+  %tmp1 = load float, ptr %bar
+  store <4 x float> %vec, ptr %dep_ptr_1, align 16
+  %A = load <4 x float>, ptr %dep_ptr_2, align 16
   %tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
-  %tmp3 = getelementptr float, float* %bar, i64 %inc
-  store float* %tmp3, float** %ptr
+  %tmp3 = getelementptr float, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
   ret <4 x float> %tmp2
 }
 
@@ -8931,7 +8931,7 @@ define <4 x float> @test_v4f32_post_reg_ld1lane_dep_vec_on_load(float* %bar, flo
 ; One way to trick that combine into running early is to force the vector ops
 ; legalizer to run.  We achieve that using the ctpop.
 ; PR23265
-define <4 x i16> @test_v4i16_post_reg_ld1lane_forced_narrow(i16* %bar, i16** %ptr, i64 %inc, <4 x i16> %A, <2 x i32>* %d) {
+define <4 x i16> @test_v4i16_post_reg_ld1lane_forced_narrow(ptr %bar, ptr %ptr, i64 %inc, <4 x i16> %A, ptr %d) {
 ; CHECK-LABEL: test_v4i16_post_reg_ld1lane_forced_narrow:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsl x8, x2, #1
@@ -8945,19 +8945,19 @@ define <4 x i16> @test_v4i16_post_reg_ld1lane_forced_narrow(i16* %bar, i16** %pt
 ; CHECK-NEXT:    uaddlp.2s v1, v1
 ; CHECK-NEXT:    str d1, [x3]
 ; CHECK-NEXT:    ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
-  %tmp3 = getelementptr i16, i16* %bar, i64 %inc
-  store i16* %tmp3, i16** %ptr
-  %dl =  load <2 x i32>,  <2 x i32>* %d
+  %tmp3 = getelementptr i16, ptr %bar, i64 %inc
+  store ptr %tmp3, ptr %ptr
+  %dl =  load <2 x i32>,  ptr %d
   %dr = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %dl)
-  store <2 x i32> %dr, <2 x i32>* %d
+  store <2 x i32> %dr, ptr %d
   ret <4 x i16> %tmp2
 }
 
 declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>)
 
-define void @test_ld1lane_build(i32* %ptr0, i32* %ptr1, i32* %ptr2, i32* %ptr3, <2 x i32>* %out) {
+define void @test_ld1lane_build(ptr %ptr0, ptr %ptr1, ptr %ptr2, ptr %ptr3, ptr %out) {
 ; CHECK-LABEL: test_ld1lane_build:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr s0, [x2]
@@ -8967,22 +8967,22 @@ define void @test_ld1lane_build(i32* %ptr0, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; CHECK-NEXT:    sub.2s v0, v1, v0
 ; CHECK-NEXT:    str d0, [x4]
 ; CHECK-NEXT:    ret
-  %load0 = load i32, i32* %ptr0, align 4
-  %load1 = load i32, i32* %ptr1, align 4
+  %load0 = load i32, ptr %ptr0, align 4
+  %load1 = load i32, ptr %ptr1, align 4
   %vec0_0 = insertelement <2 x i32> undef, i32 %load0, i32 0
   %vec0_1 = insertelement <2 x i32> %vec0_0, i32 %load1, i32 1
 
-  %load2 = load i32, i32* %ptr2, align 4
-  %load3 = load i32, i32* %ptr3, align 4
+  %load2 = load i32, ptr %ptr2, align 4
+  %load3 = load i32, ptr %ptr3, align 4
   %vec1_0 = insertelement <2 x i32> undef, i32 %load2, i32 0
   %vec1_1 = insertelement <2 x i32> %vec1_0, i32 %load3, i32 1
 
   %sub = sub nsw <2 x i32> %vec0_1, %vec1_1
-  store <2 x i32> %sub, <2 x i32>* %out, align 16
+  store <2 x i32> %sub, ptr %out, align 16
   ret void
 }
 
-define void  @test_ld1lane_build_i16(i16* %a, i16* %b, i16* %c, i16* %d, <4 x i16> %e, <4 x i16>* %p) {
+define void  @test_ld1lane_build_i16(ptr %a, ptr %b, ptr %c, ptr %d, <4 x i16> %e, ptr %p) {
 ; CHECK-LABEL: test_ld1lane_build_i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr h1, [x0]
@@ -8992,20 +8992,20 @@ define void  @test_ld1lane_build_i16(i16* %a, i16* %b, i16* %c, i16* %d, <4 x i1
 ; CHECK-NEXT:    sub.4h v0, v1, v0
 ; CHECK-NEXT:    str d0, [x4]
 ; CHECK-NEXT:    ret
-  %ld.a = load i16, i16* %a
-  %ld.b = load i16, i16* %b
-  %ld.c = load i16, i16* %c
-  %ld.d = load i16, i16* %d
+  %ld.a = load i16, ptr %a
+  %ld.b = load i16, ptr %b
+  %ld.c = load i16, ptr %c
+  %ld.d = load i16, ptr %d
   %v.a = insertelement <4 x i16> undef, i16 %ld.a, i64 0
   %v.b = insertelement <4 x i16> %v.a, i16 %ld.b, i64 1
   %v.c = insertelement <4 x i16> %v.b, i16 %ld.c, i64 2
   %v = insertelement <4 x i16> %v.c, i16 %ld.d, i64 3
   %sub = sub nsw <4 x i16> %v, %e
-  store <4 x i16> %sub, <4 x i16>* %p
+  store <4 x i16> %sub, ptr %p
   ret void
 }
 
-define void  @test_ld1lane_build_half(half* %a, half* %b, half* %c, half* %d, <4 x half> %e, <4 x half>* %p) {
+define void  @test_ld1lane_build_half(ptr %a, ptr %b, ptr %c, ptr %d, <4 x half> %e, ptr %p) {
 ; CHECK-LABEL: test_ld1lane_build_half:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr h1, [x0]
@@ -9018,20 +9018,20 @@ define void  @test_ld1lane_build_half(half* %a, half* %b, half* %c, half* %d, <4
 ; CHECK-NEXT:    fcvtn v0.4h, v0.4s
 ; CHECK-NEXT:    str d0, [x4]
 ; CHECK-NEXT:    ret
-  %ld.a = load half, half* %a
-  %ld.b = load half, half* %b
-  %ld.c = load half, half* %c
-  %ld.d = load half, half* %d
+  %ld.a = load half, ptr %a
+  %ld.b = load half, ptr %b
+  %ld.c = load half, ptr %c
+  %ld.d = load half, ptr %d
   %v.a = insertelement <4 x half> undef, half %ld.a, i64 0
   %v.b = insertelement <4 x half> %v.a, half %ld.b, i64 1
   %v.c = insertelement <4 x half> %v.b, half %ld.c, i64 2
   %v = insertelement <4 x half> %v.c, half %ld.d, i64 3
   %sub = fsub <4 x half> %v, %e
-  store <4 x half> %sub, <4 x half>* %p
+  store <4 x half> %sub, ptr %p
   ret void
 }
 
-define void  @test_ld1lane_build_i8(i8* %a, i8* %b, i8* %c, i8* %d, i8* %e, i8* %f, i8* %g, i8* %h, <8 x i8> %v, <8 x i8>* %p) {
+define void  @test_ld1lane_build_i8(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ptr %h, <8 x i8> %v, ptr %p) {
 ; CHECK-LABEL: test_ld1lane_build_i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr b1, [x0]
@@ -9046,14 +9046,14 @@ define void  @test_ld1lane_build_i8(i8* %a, i8* %b, i8* %c, i8* %d, i8* %e, i8*
 ; CHECK-NEXT:    sub.8b v0, v1, v0
 ; CHECK-NEXT:    str d0, [x8]
 ; CHECK-NEXT:    ret
-  %ld.a = load i8, i8* %a
-  %ld.b = load i8, i8* %b
-  %ld.c = load i8, i8* %c
-  %ld.d = load i8, i8* %d
-  %ld.e = load i8, i8* %e
-  %ld.f = load i8, i8* %f
-  %ld.g = load i8, i8* %g
-  %ld.h = load i8, i8* %h
+  %ld.a = load i8, ptr %a
+  %ld.b = load i8, ptr %b
+  %ld.c = load i8, ptr %c
+  %ld.d = load i8, ptr %d
+  %ld.e = load i8, ptr %e
+  %ld.f = load i8, ptr %f
+  %ld.g = load i8, ptr %g
+  %ld.h = load i8, ptr %h
   %v.a = insertelement <8 x i8> undef, i8 %ld.a, i64 0
   %v.b = insertelement <8 x i8> %v.a,  i8 %ld.b, i64 1
   %v.c = insertelement <8 x i8> %v.b,  i8 %ld.c, i64 2
@@ -9063,11 +9063,11 @@ define void  @test_ld1lane_build_i8(i8* %a, i8* %b, i8* %c, i8* %d, i8* %e, i8*
   %v.g = insertelement <8 x i8> %v.f,  i8 %ld.g, i64 6
   %v1 = insertelement <8 x i8> %v.g,  i8 %ld.h, i64 7
   %sub = sub nsw <8 x i8> %v1, %v
-  store <8 x i8> %sub, <8 x i8>* %p
+  store <8 x i8> %sub, ptr %p
   ret void
 }
 
-define <4 x i32> @test_inc_cycle(<4 x i32> %vec, i32* %in) {
+define <4 x i32> @test_inc_cycle(<4 x i32> %vec, ptr %in) {
 ; CHECK-LABEL: test_inc_cycle:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ld1.s { v0 }[0], [x0]
@@ -9076,22 +9076,22 @@ define <4 x i32> @test_inc_cycle(<4 x i32> %vec, i32* %in) {
 ; CHECK-NEXT:    add x9, x0, x9, lsl #2
 ; CHECK-NEXT:    str x9, [x8, _var at PAGEOFF]
 ; CHECK-NEXT:    ret
-  %elt = load i32, i32* %in
+  %elt = load i32, ptr %in
   %newvec = insertelement <4 x i32> %vec, i32 %elt, i32 0
 
   ; %inc cannot be %elt directly because we check that the load is only
   ; used by the insert before trying to form post-inc.
   %inc.vec = bitcast <4 x i32> %newvec to <2 x i64>
   %inc = extractelement <2 x i64> %inc.vec, i32 0
-  %newaddr = getelementptr i32, i32* %in, i64 %inc
-  store i32* %newaddr, i32** @var
+  %newaddr = getelementptr i32, ptr %in, i64 %inc
+  store ptr %newaddr, ptr @var
 
   ret <4 x i32> %newvec
 }
 
- at var = global i32* null
+ at var = global ptr null
 
-define i8 @load_single_extract_variable_index_i8(<16 x i8>* %A, i32 %idx) {
+define i8 @load_single_extract_variable_index_i8(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_variable_index_i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -9104,12 +9104,12 @@ define i8 @load_single_extract_variable_index_i8(<16 x i8>* %A, i32 %idx) {
 ; CHECK-NEXT:    ldrb w0, [x8]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %lv = load <16 x i8>, <16 x i8>* %A
+  %lv = load <16 x i8>, ptr %A
   %e = extractelement <16 x i8> %lv, i32 %idx
   ret i8 %e
 }
 
-define i16 @load_single_extract_variable_index_i16(<8 x i16>* %A, i32 %idx) {
+define i16 @load_single_extract_variable_index_i16(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_variable_index_i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -9122,24 +9122,24 @@ define i16 @load_single_extract_variable_index_i16(<8 x i16>* %A, i32 %idx) {
 ; CHECK-NEXT:    ldrh w0, [x8]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %lv = load <8 x i16>, <8 x i16>* %A
+  %lv = load <8 x i16>, ptr %A
   %e = extractelement <8 x i16> %lv, i32 %idx
   ret i16 %e
 }
 
-define i32 @load_single_extract_variable_index_i32(<4 x i32>* %A, i32 %idx) {
+define i32 @load_single_extract_variable_index_i32(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_variable_index_i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    and x8, x1, #0x3
 ; CHECK-NEXT:    ldr w0, [x0, x8, lsl #2]
 ; CHECK-NEXT:    ret
-  %lv = load <4 x i32>, <4 x i32>* %A
+  %lv = load <4 x i32>, ptr %A
   %e = extractelement <4 x i32> %lv, i32 %idx
   ret i32 %e
 }
 
-define i32 @load_single_extract_variable_index_v3i32_small_align(<3 x i32>* %A, i32 %idx) {
+define i32 @load_single_extract_variable_index_v3i32_small_align(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_variable_index_v3i32_small_align:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w9, w1
@@ -9148,12 +9148,12 @@ define i32 @load_single_extract_variable_index_v3i32_small_align(<3 x i32>* %A,
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    ldr w0, [x0, x8, lsl #2]
 ; CHECK-NEXT:    ret
-  %lv = load <3 x i32>, <3 x i32>* %A, align 2
+  %lv = load <3 x i32>, ptr %A, align 2
   %e = extractelement <3 x i32> %lv, i32 %idx
   ret i32 %e
 }
 
-define i32 @load_single_extract_variable_index_v3i32_default_align(<3 x i32>* %A, i32 %idx) {
+define i32 @load_single_extract_variable_index_v3i32_default_align(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_variable_index_v3i32_default_align:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w9, w1
@@ -9162,41 +9162,41 @@ define i32 @load_single_extract_variable_index_v3i32_default_align(<3 x i32>* %A
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    ldr w0, [x0, x8, lsl #2]
 ; CHECK-NEXT:    ret
-  %lv = load <3 x i32>, <3 x i32>* %A
+  %lv = load <3 x i32>, ptr %A
   %e = extractelement <3 x i32> %lv, i32 %idx
   ret i32 %e
 }
 
-define i32 @load_single_extract_valid_const_index_v3i32(<3 x i32>* %A, i32 %idx) {
+define i32 @load_single_extract_valid_const_index_v3i32(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_valid_const_index_v3i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0, #8]
 ; CHECK-NEXT:    ret
-  %lv = load <3 x i32>, <3 x i32>* %A
+  %lv = load <3 x i32>, ptr %A
   %e = extractelement <3 x i32> %lv, i32 2
   ret i32 %e
 }
 
-define i32 @load_single_extract_variable_index_masked_i32(<4 x i32>* %A, i32 %idx) {
+define i32 @load_single_extract_variable_index_masked_i32(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_variable_index_masked_i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    and w8, w1, #0x3
 ; CHECK-NEXT:    ldr w0, [x0, w8, uxtw #2]
 ; CHECK-NEXT:    ret
   %idx.x = and i32 %idx, 3
-  %lv = load <4 x i32>, <4 x i32>* %A
+  %lv = load <4 x i32>, ptr %A
   %e = extractelement <4 x i32> %lv, i32 %idx.x
   ret i32 %e
 }
 
-define i32 @load_single_extract_variable_index_masked2_i32(<4 x i32>* %A, i32 %idx) {
+define i32 @load_single_extract_variable_index_masked2_i32(ptr %A, i32 %idx) {
 ; CHECK-LABEL: load_single_extract_variable_index_masked2_i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    and w8, w1, #0x1
 ; CHECK-NEXT:    ldr w0, [x0, w8, uxtw #2]
 ; CHECK-NEXT:    ret
   %idx.x = and i32 %idx, 1
-  %lv = load <4 x i32>, <4 x i32>* %A
+  %lv = load <4 x i32>, ptr %A
   %e = extractelement <4 x i32> %lv, i32 %idx.x
   ret i32 %e
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll b/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
index 5bfc3241e2b8a..11faa0051d9e4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-inline-asm.ll
@@ -70,7 +70,7 @@ entry:
 
 ; rdar://9553599
 
-define zeroext i8 @t6(i8* %src) nounwind {
+define zeroext i8 @t6(ptr %src) nounwind {
 ; CHECK-LABEL: t6:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ; InlineAsm Start
@@ -79,11 +79,11 @@ define zeroext i8 @t6(i8* %src) nounwind {
 ; CHECK-NEXT:    and w0, w8, #0xff
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call i8 asm "ldtrb ${0:w}, [$1]", "=r,r"(i8* %src) nounwind
+  %0 = tail call i8 asm "ldtrb ${0:w}, [$1]", "=r,r"(ptr %src) nounwind
   ret i8 %0
 }
 
-define void @t7(i8* %f, i32 %g) nounwind {
+define void @t7(ptr %f, i32 %g) nounwind {
 ; CHECK-LABEL: t7:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -95,9 +95,9 @@ define void @t7(i8* %f, i32 %g) nounwind {
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
 entry:
-  %f.addr = alloca i8*, align 8
-  store i8* %f, i8** %f.addr, align 8
-  call void asm "str ${1:w}, $0", "=*Q,r"(i8** elementtype(i8*) %f.addr, i32 %g) nounwind
+  %f.addr = alloca ptr, align 8
+  store ptr %f, ptr %f.addr, align 8
+  call void asm "str ${1:w}, $0", "=*Q,r"(ptr elementtype(ptr) %f.addr, i32 %g) nounwind
   ret void
 }
 
@@ -202,7 +202,7 @@ define void @t9() nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %data = alloca <2 x double>, align 16
-  %0 = load <2 x double>, <2 x double>* %data, align 16
+  %0 = load <2 x double>, ptr %data, align 16
   call void asm sideeffect "mov.2d v4, $0\0A", "w,~{v4}"(<2 x double> %0) nounwind
   ret void
 }
@@ -242,14 +242,13 @@ define void @t10() nounwind {
 entry:
   %data = alloca <2 x float>, align 8
   %a = alloca [2 x float], align 4
-  %arraydecay = getelementptr inbounds [2 x float], [2 x float]* %a, i32 0, i32 0
-  %0 = load <2 x float>, <2 x float>* %data, align 8
-  call void asm sideeffect "ldr ${1:z}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
-  call void asm sideeffect "ldr ${1:q}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
-  call void asm sideeffect "ldr ${1:d}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
-  call void asm sideeffect "ldr ${1:s}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
-  call void asm sideeffect "ldr ${1:h}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
-  call void asm sideeffect "ldr ${1:b}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
+  %0 = load <2 x float>, ptr %data, align 8
+  call void asm sideeffect "ldr ${1:z}, [$0]\0A", "r,w"(ptr %a, <2 x float> %0) nounwind
+  call void asm sideeffect "ldr ${1:q}, [$0]\0A", "r,w"(ptr %a, <2 x float> %0) nounwind
+  call void asm sideeffect "ldr ${1:d}, [$0]\0A", "r,w"(ptr %a, <2 x float> %0) nounwind
+  call void asm sideeffect "ldr ${1:s}, [$0]\0A", "r,w"(ptr %a, <2 x float> %0) nounwind
+  call void asm sideeffect "ldr ${1:h}, [$0]\0A", "r,w"(ptr %a, <2 x float> %0) nounwind
+  call void asm sideeffect "ldr ${1:b}, [$0]\0A", "r,w"(ptr %a, <2 x float> %0) nounwind
   ret void
 }
 
@@ -271,9 +270,9 @@ define void @t11() nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i32, align 4
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   call void asm sideeffect "mov ${1:x}, ${0:x}\0A", "r,i"(i32 %0, i32 0) nounwind
-  %1 = load i32, i32* %a, align 4
+  %1 = load i32, ptr %a, align 4
   call void asm sideeffect "mov ${1:w}, ${0:w}\0A", "r,i"(i32 %1, i32 0) nounwind
   ret void
 }
@@ -290,7 +289,7 @@ define void @t12() nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %data = alloca <4 x float>, align 16
-  %0 = load <4 x float>, <4 x float>* %data, align 16
+  %0 = load <4 x float>, ptr %data, align 16
   call void asm sideeffect "mov.2d v4, $0\0A", "x,~{v4}"(<4 x float> %0) nounwind
   ret void
 }
@@ -365,7 +364,7 @@ entry:
 
 ; rdar://problem/14285178
 
-define void @test_zero_reg(i32* %addr) {
+define void @test_zero_reg(ptr %addr) {
 ; CHECK-LABEL: test_zero_reg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -441,7 +440,7 @@ define void @test_constraint_w(i32 %a) {
   ret void
 }
 
-define void @test_inline_modifier_a(i8* %ptr) nounwind {
+define void @test_inline_modifier_a(ptr %ptr) nounwind {
 ; CHECK-LABEL: test_inline_modifier_a:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; InlineAsm Start
@@ -449,7 +448,7 @@ define void @test_inline_modifier_a(i8* %ptr) nounwind {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ret
-  tail call void asm sideeffect "prfm pldl1keep, ${0:a}\0A", "r"(i8* %ptr)
+  tail call void asm sideeffect "prfm pldl1keep, ${0:a}\0A", "r"(ptr %ptr)
   ret void
 }
 
@@ -464,7 +463,7 @@ define void @test_zero_address() {
 ; CHECK-NEXT:    ; InlineAsm End
 ; CHECK-NEXT:    ret
 entry:
-  tail call i32 asm sideeffect "ldr $0, $1 \0A", "=r,*Q"(i32* elementtype(i32) null)
+  tail call i32 asm sideeffect "ldr $0, $1 \0A", "=r,*Q"(ptr elementtype(i32) null)
   ret void
 }
 
@@ -480,7 +479,7 @@ define void @test_no_hash_in_lane_specifier() {
   ret void
 }
 
-define void @test_vector_too_large_r_m(<9 x float>* nocapture readonly %0) {
+define void @test_vector_too_large_r_m(ptr nocapture readonly %0) {
 ; CHECK-LABEL: test_vector_too_large_r_m:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -496,9 +495,9 @@ define void @test_vector_too_large_r_m(<9 x float>* nocapture readonly %0) {
 ; CHECK-NEXT:    ret
 entry:
   %m.addr = alloca <9 x float>, align 16
-  %m = load <9 x float>, <9 x float>* %0, align 16
-  store <9 x float> %m, <9 x float>* %m.addr, align 16
-  call void asm sideeffect "", "=*r|m,0,~{memory}"(<9 x float>* elementtype(<9 x float>) nonnull %m.addr, <9 x float> %m)
+  %m = load <9 x float>, ptr %0, align 16
+  store <9 x float> %m, ptr %m.addr, align 16
+  call void asm sideeffect "", "=*r|m,0,~{memory}"(ptr elementtype(<9 x float>) nonnull %m.addr, <9 x float> %m)
   ret void
 }
 
@@ -514,6 +513,6 @@ define void @test_o_output_constraint() {
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %b = alloca i8, align 1
-  call void asm "mov $0, 7", "=*o"(i8* elementtype(i8) %b)
+  call void asm "mov $0, 7", "=*o"(ptr elementtype(i8) %b)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll b/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll
index 0c2cf1778722e..dc4d12b2b21ca 100644
--- a/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll
@@ -24,7 +24,7 @@
 ; YAML:       - INST_madd:   '2'
 ; YAML:       - INST_movz:   '1'
 ; YAML:       - INST_str:    '1'
-define i32 @foo(i32* %ptr, i32 %x, i64 %y) !dbg !3 {
+define i32 @foo(ptr %ptr, i32 %x, i64 %y) !dbg !3 {
 ; CHECK-LABEL: foo:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr w9, [x0]
@@ -43,7 +43,7 @@ define i32 @foo(i32* %ptr, i32 %x, i64 %y) !dbg !3 {
 ; CHECK-NEXT:    ; kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
 entry:
-  %l = load i32, i32* %ptr, !dbg !4
+  %l = load i32, ptr %ptr, !dbg !4
   %add = add i32 %l, %x, !dbg !4
   %add.ext = zext i32 %add to i64, !dbg !4
   %add.64 = add i64 %add.ext, %y, !dbg !4
@@ -54,7 +54,7 @@ then:
   ret i32 %add, !dbg !5
 
 else:
-  store i32 10, i32* %ptr, !dbg !6
+  store i32 10, ptr %ptr, !dbg !6
   %res = mul i32 %add, %x, !dbg !6
   %res.2 = mul i32 %res, %x, !dbg !6
   ret i32 %res.2, !dbg !6

diff  --git a/llvm/test/CodeGen/AArch64/arm64-jumptable.ll b/llvm/test/CodeGen/AArch64/arm64-jumptable.ll
index fac3e5704d152..d4ac9e72b28ff 100644
--- a/llvm/test/CodeGen/AArch64/arm64-jumptable.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-jumptable.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=arm64-linux-gnu < %s | FileCheck %s --check-prefix=CHECK-LINUX
 ; <rdar://11417675>
 
-define void @sum(i32 %a, i32* %to, i32 %c) {
+define void @sum(i32 %a, ptr %to, i32 %c) {
 entry:
   switch i32 %a, label %exit [
     i32 1, label %bb1
@@ -19,7 +19,7 @@ bb4:
   br label %exit.sink.split
 exit.sink.split:
   %.sink = phi i32 [ 5, %bb4 ], [ %b, %bb1 ], [ 3, %bb3 ], [ %a, %entry ]
-  store i32 %.sink, i32* %to
+  store i32 %.sink, ptr %to
   br label %exit
 exit:
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/arm64-large-frame.ll b/llvm/test/CodeGen/AArch64/arm64-large-frame.ll
index 059827c5242a1..a5c0fe5ccb8b6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-large-frame.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-large-frame.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=arm64-none-linux-gnu -frame-pointer=non-leaf -disable-post-ra < %s | FileCheck %s
-declare void @use_addr(i8*)
+declare void @use_addr(ptr)
 
- at addr = global i8* null
+ at addr = global ptr null
 
 define void @test_bigframe() {
 ; CHECK-LABEL: test_bigframe:
@@ -23,23 +23,23 @@ define void @test_bigframe() {
 ; CHECK: add [[TMP:x[0-9]+]], sp, #4095, lsl #12
 ; CHECK: add [[TMP1:x[0-9]+]], [[TMP]], #787, lsl #12
 ; CHECK: add {{x[0-9]+}}, [[TMP1]], #3344
-  store volatile i8* %var1, i8** @addr
+  store volatile ptr %var1, ptr @addr
 
-  %var1plus2 = getelementptr i8, i8* %var1, i32 2
-  store volatile i8* %var1plus2, i8** @addr
+  %var1plus2 = getelementptr i8, ptr %var1, i32 2
+  store volatile ptr %var1plus2, ptr @addr
 
 ; CHECK: add [[TMP:x[0-9]+]], sp, #4095, lsl #12
 ; CHECK: add [[TMP1:x[0-9]+]], [[TMP]], #787, lsl #12
 ; CHECK: add {{x[0-9]+}}, [[TMP1]], #3328
-  store volatile i8* %var2, i8** @addr
+  store volatile ptr %var2, ptr @addr
 
-  %var2plus2 = getelementptr i8, i8* %var2, i32 2
-  store volatile i8* %var2plus2, i8** @addr
+  %var2plus2 = getelementptr i8, ptr %var2, i32 2
+  store volatile ptr %var2plus2, ptr @addr
 
-  store volatile i8* %var3, i8** @addr
+  store volatile ptr %var3, ptr @addr
 
-  %var3plus2 = getelementptr i8, i8* %var3, i32 2
-  store volatile i8* %var3plus2, i8** @addr
+  %var3plus2 = getelementptr i8, ptr %var3, i32 2
+  store volatile ptr %var3plus2, ptr @addr
 
 ; CHECK: add sp, sp, #4095, lsl #12
 ; CHECK: add sp, sp, #4095, lsl #12
@@ -60,14 +60,14 @@ define void @test_mediumframe() {
 ; CHECK-NEXT: sub sp, sp, #1168
 ; CHECK-NEXT: .cfi_def_cfa_offset 2000032
 
-  store volatile i8* %var1, i8** @addr
+  store volatile ptr %var1, ptr @addr
 ; CHECK: add     [[VAR1ADDR:x[0-9]+]], sp, #244, lsl #12
 ; CHECK: add     [[VAR1ADDR]], [[VAR1ADDR]], #592
 
 ; CHECK: add [[VAR2ADDR:x[0-9]+]], sp, #244, lsl #12
 ; CHECK: add [[VAR2ADDR]], [[VAR2ADDR]], #576
 
-  store volatile i8* %var2, i8** @addr
+  store volatile ptr %var2, ptr @addr
 ; CHECK: add     sp, sp, #488, lsl #12
 ; CHECK: add     sp, sp, #1168
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ld-from-st.ll b/llvm/test/CodeGen/AArch64/arm64-ld-from-st.ll
index 5488c21fa298f..ac548385599f9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ld-from-st.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ld-from-st.ll
@@ -2,665 +2,610 @@
 
 ; CHECK-LABEL: Str64Ldr64
 ; CHECK: mov x0, x1
-define i64 @Str64Ldr64(i64* nocapture %P, i64 %v, i64 %n) {
+define i64 @Str64Ldr64(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i64*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i64, i64* %0, i64 1
-  %1 = load i64, i64* %arrayidx1
-  ret i64 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 1
+  %0 = load i64, ptr %arrayidx1
+  ret i64 %0
 }
 
 ; CHECK-LABEL: Str64Ldr32_0
 ; CHECK: mov w0, w1
-define i32 @Str64Ldr32_0(i64* nocapture %P, i64 %v, i64 %n) {
+define i32 @Str64Ldr32_0(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i32*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 2
-  %1 = load i32, i32* %arrayidx1
-  ret i32 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 2
+  %0 = load i32, ptr %arrayidx1
+  ret i32 %0
 }
 
 ; CHECK-LABEL: Str64Ldr32_1
 ; CHECK: lsr x0, x1, #32
-define i32 @Str64Ldr32_1(i64* nocapture %P, i64 %v, i64 %n) {
+define i32 @Str64Ldr32_1(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i32*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 3
-  %1 = load i32, i32* %arrayidx1
-  ret i32 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 3
+  %0 = load i32, ptr %arrayidx1
+  ret i32 %0
 }
 
 ; CHECK-LABEL: Str64Ldr16_0
 ; CHECK: mov w0, w1
-define i16 @Str64Ldr16_0(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Str64Ldr16_0(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 4
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 4
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Str64Ldr16_1
 ; CHECK: ubfx x0, x1, #16, #16
-define i16 @Str64Ldr16_1(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Str64Ldr16_1(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 5
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 5
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Str64Ldr16_2
 ; CHECK: ubfx x0, x1, #32, #16
-define i16 @Str64Ldr16_2(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Str64Ldr16_2(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 6
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 6
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Str64Ldr16_3
 ; CHECK: lsr x0, x1, #48
-define i16 @Str64Ldr16_3(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Str64Ldr16_3(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 7
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 7
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_0
 ; CHECK: mov w0, w1
-define i8 @Str64Ldr8_0(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_0(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 8
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 8
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_1
 ; CHECK: ubfx x0, x1, #8, #8
-define i8 @Str64Ldr8_1(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_1(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 9
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 9
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_2
 ; CHECK: ubfx x0, x1, #16, #8
-define i8 @Str64Ldr8_2(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_2(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 10
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 10
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_3
 ; CHECK: ubfx x0, x1, #24, #8
-define i8 @Str64Ldr8_3(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_3(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 11
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 11
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_4
 ; CHECK: ubfx x0, x1, #32, #8
-define i8 @Str64Ldr8_4(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_4(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 12
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 12
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_5
 ; CHECK: ubfx x0, x1, #40, #8
-define i8 @Str64Ldr8_5(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_5(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 13
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 13
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_6
 ; CHECK: ubfx x0, x1, #48, #8
-define i8 @Str64Ldr8_6(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_6(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 14
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 14
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str64Ldr8_7
 ; CHECK: lsr x0, x1, #56
-define i8 @Str64Ldr8_7(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Str64Ldr8_7(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 15
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 15
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str32Ldr32
 ; CHECK: mov w0, w1
-define i32 @Str32Ldr32(i32* nocapture %P, i32 %v, i64 %n) {
+define i32 @Str32Ldr32(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i32*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 1
-  %1 = load i32, i32* %arrayidx1
-  ret i32 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 1
+  %0 = load i32, ptr %arrayidx1
+  ret i32 %0
 }
 
 ; CHECK-LABEL: Str32Ldr16_0
 ; CHECK: mov w0, w1
-define i16 @Str32Ldr16_0(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @Str32Ldr16_0(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 2
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 2
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Str32Ldr16_1
 ; CHECK: lsr	w0, w1, #16
-define i16 @Str32Ldr16_1(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @Str32Ldr16_1(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 3
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 3
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Str32Ldr8_0
 ; CHECK: mov w0, w1
-define i8 @Str32Ldr8_0(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Str32Ldr8_0(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 4
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 4
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str32Ldr8_1
 ; CHECK: ubfx w0, w1, #8, #8
-define i8 @Str32Ldr8_1(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Str32Ldr8_1(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 5
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 5
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str32Ldr8_2
 ; CHECK: ubfx w0, w1, #16, #8
-define i8 @Str32Ldr8_2(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Str32Ldr8_2(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 6
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 6
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str32Ldr8_3
 ; CHECK: lsr w0, w1, #24
-define i8 @Str32Ldr8_3(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Str32Ldr8_3(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 7
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 7
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str16Ldr16
 ; CHECK: mov w0, w1
-define i16 @Str16Ldr16(i16* nocapture %P, i16 %v, i64 %n) {
+define i16 @Str16Ldr16(ptr nocapture %P, i16 %v, i64 %n) {
 entry:
-  %0 = bitcast i16* %P to i16*
-  %arrayidx0 = getelementptr inbounds i16, i16* %P, i64 1
-  store i16 %v, i16* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 1
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i16, ptr %P, i64 1
+  store i16 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 1
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Str16Ldr8_0
 ; CHECK: mov w0, w1
-define i8 @Str16Ldr8_0(i16* nocapture %P, i16 %v, i64 %n) {
+define i8 @Str16Ldr8_0(ptr nocapture %P, i16 %v, i64 %n) {
 entry:
-  %0 = bitcast i16* %P to i8*
-  %arrayidx0 = getelementptr inbounds i16, i16* %P, i64 1
-  store i16 %v, i16* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 2
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i16, ptr %P, i64 1
+  store i16 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 2
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Str16Ldr8_1
 ; CHECK: ubfx w0, w1, #8, #8
-define i8 @Str16Ldr8_1(i16* nocapture %P, i16 %v, i64 %n) {
+define i8 @Str16Ldr8_1(ptr nocapture %P, i16 %v, i64 %n) {
 entry:
-  %0 = bitcast i16* %P to i8*
-  %arrayidx0 = getelementptr inbounds i16, i16* %P, i64 1
-  store i16 %v, i16* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 3
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i16, ptr %P, i64 1
+  store i16 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 3
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 
 ; CHECK-LABEL: Unscaled_Str64Ldr64
 ; CHECK: mov x0, x1
-define i64 @Unscaled_Str64Ldr64(i64* nocapture %P, i64 %v, i64 %n) {
+define i64 @Unscaled_Str64Ldr64(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i64*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i64, i64* %0, i64 -1
-  %1 = load i64, i64* %arrayidx1
-  ret i64 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i64, ptr %P, i64 -1
+  %0 = load i64, ptr %arrayidx1
+  ret i64 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr32_0
 ; CHECK: mov w0, w1
-define i32 @Unscaled_Str64Ldr32_0(i64* nocapture %P, i64 %v, i64 %n) {
+define i32 @Unscaled_Str64Ldr32_0(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i32*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 -2
-  %1 = load i32, i32* %arrayidx1
-  ret i32 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 -2
+  %0 = load i32, ptr %arrayidx1
+  ret i32 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr32_1
 ; CHECK: lsr x0, x1, #32
-define i32 @Unscaled_Str64Ldr32_1(i64* nocapture %P, i64 %v, i64 %n) {
+define i32 @Unscaled_Str64Ldr32_1(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i32*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 -1
-  %1 = load i32, i32* %arrayidx1
-  ret i32 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 -1
+  %0 = load i32, ptr %arrayidx1
+  ret i32 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr16_0
 ; CHECK: mov w0, w1
-define i16 @Unscaled_Str64Ldr16_0(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Unscaled_Str64Ldr16_0(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -4
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -4
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr16_1
 ; CHECK: ubfx x0, x1, #16, #16
-define i16 @Unscaled_Str64Ldr16_1(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Unscaled_Str64Ldr16_1(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -3
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -3
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr16_2
 ; CHECK: ubfx x0, x1, #32, #16
-define i16 @Unscaled_Str64Ldr16_2(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Unscaled_Str64Ldr16_2(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -2
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -2
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr16_3
 ; CHECK: lsr x0, x1, #48
-define i16 @Unscaled_Str64Ldr16_3(i64* nocapture %P, i64 %v, i64 %n) {
+define i16 @Unscaled_Str64Ldr16_3(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i16*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -1
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -1
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_0
 ; CHECK: mov w0, w1
-define i8 @Unscaled_Str64Ldr8_0(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_0(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -8
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -8
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_1
 ; CHECK: ubfx x0, x1, #8, #8
-define i8 @Unscaled_Str64Ldr8_1(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_1(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -7
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -7
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_2
 ; CHECK: ubfx x0, x1, #16, #8
-define i8 @Unscaled_Str64Ldr8_2(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_2(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -6
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -6
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_3
 ; CHECK: ubfx x0, x1, #24, #8
-define i8 @Unscaled_Str64Ldr8_3(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_3(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -5
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -5
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_4
 ; CHECK: ubfx x0, x1, #32, #8
-define i8 @Unscaled_Str64Ldr8_4(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_4(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -4
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -4
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_5
 ; CHECK: ubfx x0, x1, #40, #8
-define i8 @Unscaled_Str64Ldr8_5(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_5(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -3
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -3
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_6
 ; CHECK: ubfx x0, x1, #48, #8
-define i8 @Unscaled_Str64Ldr8_6(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_6(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -2
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -2
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str64Ldr8_7
 ; CHECK: lsr x0, x1, #56
-define i8 @Unscaled_Str64Ldr8_7(i64* nocapture %P, i64 %v, i64 %n) {
+define i8 @Unscaled_Str64Ldr8_7(ptr nocapture %P, i64 %v, i64 %n) {
 entry:
-  %0 = bitcast i64* %P to i8*
-  %arrayidx0 = getelementptr inbounds i64, i64* %P, i64 -1
-  store i64 %v, i64* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -1
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i64, ptr %P, i64 -1
+  store i64 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -1
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str32Ldr32
 ; CHECK: mov w0, w1
-define i32 @Unscaled_Str32Ldr32(i32* nocapture %P, i32 %v, i64 %n) {
+define i32 @Unscaled_Str32Ldr32(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i32*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 -1
-  %1 = load i32, i32* %arrayidx1
-  ret i32 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i32, ptr %P, i64 -1
+  %0 = load i32, ptr %arrayidx1
+  ret i32 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str32Ldr16_0
 ; CHECK: mov w0, w1
-define i16 @Unscaled_Str32Ldr16_0(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @Unscaled_Str32Ldr16_0(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -2
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -2
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str32Ldr16_1
 ; CHECK: lsr	w0, w1, #16
-define i16 @Unscaled_Str32Ldr16_1(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @Unscaled_Str32Ldr16_1(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -1
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -1
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str32Ldr8_0
 ; CHECK: mov w0, w1
-define i8 @Unscaled_Str32Ldr8_0(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Unscaled_Str32Ldr8_0(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -4
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -4
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str32Ldr8_1
 ; CHECK: ubfx w0, w1, #8, #8
-define i8 @Unscaled_Str32Ldr8_1(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Unscaled_Str32Ldr8_1(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -3
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -3
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str32Ldr8_2
 ; CHECK: ubfx w0, w1, #16, #8
-define i8 @Unscaled_Str32Ldr8_2(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Unscaled_Str32Ldr8_2(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -2
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -2
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str32Ldr8_3
 ; CHECK: lsr w0, w1, #24
-define i8 @Unscaled_Str32Ldr8_3(i32* nocapture %P, i32 %v, i64 %n) {
+define i8 @Unscaled_Str32Ldr8_3(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i8*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -1
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -1
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str16Ldr16
 ; CHECK: mov w0, w1
-define i16 @Unscaled_Str16Ldr16(i16* nocapture %P, i16 %v, i64 %n) {
+define i16 @Unscaled_Str16Ldr16(ptr nocapture %P, i16 %v, i64 %n) {
 entry:
-  %0 = bitcast i16* %P to i16*
-  %arrayidx0 = getelementptr inbounds i16, i16* %P, i64 -1
-  store i16 %v, i16* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -1
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i16, ptr %P, i64 -1
+  store i16 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -1
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str16Ldr8_0
 ; CHECK: mov w0, w1
-define i8 @Unscaled_Str16Ldr8_0(i16* nocapture %P, i16 %v, i64 %n) {
+define i8 @Unscaled_Str16Ldr8_0(ptr nocapture %P, i16 %v, i64 %n) {
 entry:
-  %0 = bitcast i16* %P to i8*
-  %arrayidx0 = getelementptr inbounds i16, i16* %P, i64 -1
-  store i16 %v, i16* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -2
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i16, ptr %P, i64 -1
+  store i16 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -2
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: Unscaled_Str16Ldr8_1
 ; CHECK: ubfx w0, w1, #8, #8
-define i8 @Unscaled_Str16Ldr8_1(i16* nocapture %P, i16 %v, i64 %n) {
+define i8 @Unscaled_Str16Ldr8_1(ptr nocapture %P, i16 %v, i64 %n) {
 entry:
-  %0 = bitcast i16* %P to i8*
-  %arrayidx0 = getelementptr inbounds i16, i16* %P, i64 -1
-  store i16 %v, i16* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i8, i8* %0, i64 -1
-  %1 = load i8, i8* %arrayidx1
-  ret i8 %1
+  %arrayidx0 = getelementptr inbounds i16, ptr %P, i64 -1
+  store i16 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i8, ptr %P, i64 -1
+  %0 = load i8, ptr %arrayidx1
+  ret i8 %0
 }
 
 ; CHECK-LABEL: StrVolatileLdr
 ; CHECK: ldrh
-define i16 @StrVolatileLdr(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @StrVolatileLdr(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 2
-  %1 = load volatile i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 2
+  %0 = load volatile i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: StrNotInRangeLdr
 ; CHECK: ldrh
-define i16 @StrNotInRangeLdr(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @StrNotInRangeLdr(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 1
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 1
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: Unscaled_StrNotInRangeLdr
 ; CHECK: ldurh
-define i16 @Unscaled_StrNotInRangeLdr(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @Unscaled_StrNotInRangeLdr(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 -1
-  store i32 %v, i32* %arrayidx0
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 -3
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 -1
+  store i32 %v, ptr %arrayidx0
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 -3
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 ; CHECK-LABEL: StrCallLdr
 ; CHECK: ldrh
-define i16 @StrCallLdr(i32* nocapture %P, i32 %v, i64 %n) {
+define i16 @StrCallLdr(ptr nocapture %P, i32 %v, i64 %n) {
 entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
   %c = call i1 @test_dummy()
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 1
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 1
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }
 
 declare i1 @test_dummy()
 
 ; CHECK-LABEL: StrStrLdr
 ; CHECK: ldrh
-define i16 @StrStrLdr(i32 %v, i32* %P, i32* %P2, i32 %n) {
-entry:
-  %0 = bitcast i32* %P to i16*
-  %arrayidx0 = getelementptr inbounds i32, i32* %P, i64 1
-  store i32 %v, i32* %arrayidx0
-  store i32 %n, i32* %P2
-  %arrayidx1 = getelementptr inbounds i16, i16* %0, i64 2
-  %1 = load i16, i16* %arrayidx1
-  ret i16 %1
+define i16 @StrStrLdr(i32 %v, ptr %P, ptr %P2, i32 %n) {
+entry:
+  %arrayidx0 = getelementptr inbounds i32, ptr %P, i64 1
+  store i32 %v, ptr %arrayidx0
+  store i32 %n, ptr %P2
+  %arrayidx1 = getelementptr inbounds i16, ptr %P, i64 2
+  %0 = load i16, ptr %arrayidx1
+  ret i16 %0
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ld1.ll b/llvm/test/CodeGen/AArch64/arm64-ld1.ll
index 6b119932cb737..487a2fc155c4d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ld1.ll
@@ -4,452 +4,452 @@
 %struct.__neon_int8x8x3_t = type { <8 x i8>,  <8 x i8>,  <8 x i8> }
 %struct.__neon_int8x8x4_t = type { <8 x i8>,  <8 x i8>, <8 x i8>,  <8 x i8> }
 
-define %struct.__neon_int8x8x2_t @ld2_8b(i8* %A) nounwind {
+define %struct.__neon_int8x8x2_t @ld2_8b(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_8b
 ; Make sure we are loading into the results defined by the ABI (i.e., v0, v1)
 ; and from the argument of the function also defined by ABI (i.e., x0)
 ; CHECK: ld2.8b { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0(ptr %A)
 	ret %struct.__neon_int8x8x2_t  %tmp2
 }
 
-define %struct.__neon_int8x8x3_t @ld3_8b(i8* %A) nounwind {
+define %struct.__neon_int8x8x3_t @ld3_8b(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_8b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3.8b { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0(ptr %A)
 	ret %struct.__neon_int8x8x3_t  %tmp2
 }
 
-define %struct.__neon_int8x8x4_t @ld4_8b(i8* %A) nounwind {
+define %struct.__neon_int8x8x4_t @ld4_8b(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_8b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4.8b { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0(ptr %A)
 	ret %struct.__neon_int8x8x4_t  %tmp2
 }
 
-declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0(ptr) nounwind readonly
 
 %struct.__neon_int8x16x2_t = type { <16 x i8>,  <16 x i8> }
 %struct.__neon_int8x16x3_t = type { <16 x i8>,  <16 x i8>,  <16 x i8> }
 %struct.__neon_int8x16x4_t = type { <16 x i8>,  <16 x i8>, <16 x i8>,  <16 x i8> }
 
-define %struct.__neon_int8x16x2_t @ld2_16b(i8* %A) nounwind {
+define %struct.__neon_int8x16x2_t @ld2_16b(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2.16b { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
+  %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0(ptr %A)
   ret %struct.__neon_int8x16x2_t  %tmp2
 }
 
-define %struct.__neon_int8x16x3_t @ld3_16b(i8* %A) nounwind {
+define %struct.__neon_int8x16x3_t @ld3_16b(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3.16b { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
+  %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0(ptr %A)
   ret %struct.__neon_int8x16x3_t  %tmp2
 }
 
-define %struct.__neon_int8x16x4_t @ld4_16b(i8* %A) nounwind {
+define %struct.__neon_int8x16x4_t @ld4_16b(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4.16b { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
+  %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0(ptr %A)
   ret %struct.__neon_int8x16x4_t  %tmp2
 }
 
-declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0(ptr) nounwind readonly
 
 %struct.__neon_int16x4x2_t = type { <4 x i16>,  <4 x i16> }
 %struct.__neon_int16x4x3_t = type { <4 x i16>,  <4 x i16>,  <4 x i16> }
 %struct.__neon_int16x4x4_t = type { <4 x i16>,  <4 x i16>, <4 x i16>,  <4 x i16> }
 
-define %struct.__neon_int16x4x2_t @ld2_4h(i16* %A) nounwind {
+define %struct.__neon_int16x4x2_t @ld2_4h(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2.4h { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
+	%tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0(ptr %A)
 	ret %struct.__neon_int16x4x2_t  %tmp2
 }
 
-define %struct.__neon_int16x4x3_t @ld3_4h(i16* %A) nounwind {
+define %struct.__neon_int16x4x3_t @ld3_4h(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3.4h { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
+	%tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0(ptr %A)
 	ret %struct.__neon_int16x4x3_t  %tmp2
 }
 
-define %struct.__neon_int16x4x4_t @ld4_4h(i16* %A) nounwind {
+define %struct.__neon_int16x4x4_t @ld4_4h(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4.4h { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
+	%tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0(ptr %A)
 	ret %struct.__neon_int16x4x4_t  %tmp2
 }
 
-declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0(ptr) nounwind readonly
 
 %struct.__neon_int16x8x2_t = type { <8 x i16>,  <8 x i16> }
 %struct.__neon_int16x8x3_t = type { <8 x i16>,  <8 x i16>,  <8 x i16> }
 %struct.__neon_int16x8x4_t = type { <8 x i16>,  <8 x i16>, <8 x i16>,  <8 x i16> }
 
-define %struct.__neon_int16x8x2_t @ld2_8h(i16* %A) nounwind {
+define %struct.__neon_int16x8x2_t @ld2_8h(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2.8h { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
+  %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0(ptr %A)
   ret %struct.__neon_int16x8x2_t  %tmp2
 }
 
-define %struct.__neon_int16x8x3_t @ld3_8h(i16* %A) nounwind {
+define %struct.__neon_int16x8x3_t @ld3_8h(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3.8h { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
+  %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0(ptr %A)
   ret %struct.__neon_int16x8x3_t %tmp2
 }
 
-define %struct.__neon_int16x8x4_t @ld4_8h(i16* %A) nounwind {
+define %struct.__neon_int16x8x4_t @ld4_8h(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4.8h { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
+  %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0(ptr %A)
   ret %struct.__neon_int16x8x4_t  %tmp2
 }
 
-declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0(ptr) nounwind readonly
 
 %struct.__neon_int32x2x2_t = type { <2 x i32>,  <2 x i32> }
 %struct.__neon_int32x2x3_t = type { <2 x i32>,  <2 x i32>,  <2 x i32> }
 %struct.__neon_int32x2x4_t = type { <2 x i32>,  <2 x i32>, <2 x i32>,  <2 x i32> }
 
-define %struct.__neon_int32x2x2_t @ld2_2s(i32* %A) nounwind {
+define %struct.__neon_int32x2x2_t @ld2_2s(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_2s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2.2s { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0(ptr %A)
 	ret %struct.__neon_int32x2x2_t  %tmp2
 }
 
-define %struct.__neon_int32x2x3_t @ld3_2s(i32* %A) nounwind {
+define %struct.__neon_int32x2x3_t @ld3_2s(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_2s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3.2s { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0(ptr %A)
 	ret %struct.__neon_int32x2x3_t  %tmp2
 }
 
-define %struct.__neon_int32x2x4_t @ld4_2s(i32* %A) nounwind {
+define %struct.__neon_int32x2x4_t @ld4_2s(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_2s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4.2s { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0(ptr %A)
 	ret %struct.__neon_int32x2x4_t  %tmp2
 }
 
-declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0(ptr) nounwind readonly
 
 %struct.__neon_int32x4x2_t = type { <4 x i32>,  <4 x i32> }
 %struct.__neon_int32x4x3_t = type { <4 x i32>,  <4 x i32>,  <4 x i32> }
 %struct.__neon_int32x4x4_t = type { <4 x i32>,  <4 x i32>, <4 x i32>,  <4 x i32> }
 
-define %struct.__neon_int32x4x2_t @ld2_4s(i32* %A) nounwind {
+define %struct.__neon_int32x4x2_t @ld2_4s(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2.4s { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0(ptr %A)
 	ret %struct.__neon_int32x4x2_t  %tmp2
 }
 
-define %struct.__neon_int32x4x3_t @ld3_4s(i32* %A) nounwind {
+define %struct.__neon_int32x4x3_t @ld3_4s(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3.4s { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0(ptr %A)
 	ret %struct.__neon_int32x4x3_t  %tmp2
 }
 
-define %struct.__neon_int32x4x4_t @ld4_4s(i32* %A) nounwind {
+define %struct.__neon_int32x4x4_t @ld4_4s(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4.4s { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0(ptr %A)
 	ret %struct.__neon_int32x4x4_t  %tmp2
 }
 
-declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0(ptr) nounwind readonly
 
 %struct.__neon_int64x2x2_t = type { <2 x i64>,  <2 x i64> }
 %struct.__neon_int64x2x3_t = type { <2 x i64>,  <2 x i64>,  <2 x i64> }
 %struct.__neon_int64x2x4_t = type { <2 x i64>,  <2 x i64>, <2 x i64>,  <2 x i64> }
 
-define %struct.__neon_int64x2x2_t @ld2_2d(i64* %A) nounwind {
+define %struct.__neon_int64x2x2_t @ld2_2d(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2.2d { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0(ptr %A)
 	ret %struct.__neon_int64x2x2_t  %tmp2
 }
 
-define %struct.__neon_int64x2x3_t @ld3_2d(i64* %A) nounwind {
+define %struct.__neon_int64x2x3_t @ld3_2d(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3.2d { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0(ptr %A)
 	ret %struct.__neon_int64x2x3_t  %tmp2
 }
 
-define %struct.__neon_int64x2x4_t @ld4_2d(i64* %A) nounwind {
+define %struct.__neon_int64x2x4_t @ld4_2d(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4.2d { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0(ptr %A)
 	ret %struct.__neon_int64x2x4_t  %tmp2
 }
 
-declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0(ptr) nounwind readonly
 
 %struct.__neon_int64x1x2_t = type { <1 x i64>,  <1 x i64> }
 %struct.__neon_int64x1x3_t = type { <1 x i64>,  <1 x i64>, <1 x i64> }
 %struct.__neon_int64x1x4_t = type { <1 x i64>,  <1 x i64>, <1 x i64>, <1 x i64> }
 
 
-define %struct.__neon_int64x1x2_t @ld2_1di64(i64* %A) nounwind {
+define %struct.__neon_int64x1x2_t @ld2_1di64(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_1di64
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.1d { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0(ptr %A)
 	ret %struct.__neon_int64x1x2_t  %tmp2
 }
 
-define %struct.__neon_int64x1x3_t @ld3_1di64(i64* %A) nounwind {
+define %struct.__neon_int64x1x3_t @ld3_1di64(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_1di64
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.1d { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0(ptr %A)
 	ret %struct.__neon_int64x1x3_t  %tmp2
 }
 
-define %struct.__neon_int64x1x4_t @ld4_1di64(i64* %A) nounwind {
+define %struct.__neon_int64x1x4_t @ld4_1di64(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_1di64
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.1d { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0(ptr %A)
 	ret %struct.__neon_int64x1x4_t  %tmp2
 }
 
 
-declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0(ptr) nounwind readonly
 
 %struct.__neon_float64x1x2_t = type { <1 x double>,  <1 x double> }
 %struct.__neon_float64x1x3_t = type { <1 x double>,  <1 x double>, <1 x double> }
 %struct.__neon_float64x1x4_t = type { <1 x double>,  <1 x double>, <1 x double>, <1 x double> }
 
 
-define %struct.__neon_float64x1x2_t @ld2_1df64(double* %A) nounwind {
+define %struct.__neon_float64x1x2_t @ld2_1df64(ptr %A) nounwind {
 ; CHECK-LABEL: ld2_1df64
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.1d { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
+	%tmp2 = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0(ptr %A)
 	ret %struct.__neon_float64x1x2_t  %tmp2
 }
 
-define %struct.__neon_float64x1x3_t @ld3_1df64(double* %A) nounwind {
+define %struct.__neon_float64x1x3_t @ld3_1df64(ptr %A) nounwind {
 ; CHECK-LABEL: ld3_1df64
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.1d { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
+	%tmp2 = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0(ptr %A)
 	ret %struct.__neon_float64x1x3_t  %tmp2
 }
 
-define %struct.__neon_float64x1x4_t @ld4_1df64(double* %A) nounwind {
+define %struct.__neon_float64x1x4_t @ld4_1df64(ptr %A) nounwind {
 ; CHECK-LABEL: ld4_1df64
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.1d { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
+	%tmp2 = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0(ptr %A)
 	ret %struct.__neon_float64x1x4_t  %tmp2
 }
 
-declare %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0f64(double*) nounwind readonly
-declare %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0f64(double*) nounwind readonly
-declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0(ptr) nounwind readonly
 
 
-define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, i8* %A) nounwind {
+define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2lane_16b
 ; CHECK: ld2.b { v0, v1 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, i64 1, i8* %A)
+	%tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0(<16 x i8> %L1, <16 x i8> %L2, i64 1, ptr %A)
 	ret %struct.__neon_int8x16x2_t  %tmp2
 }
 
-define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i8* %A) nounwind {
+define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3lane_16b
 ; CHECK: ld3.b { v0, v1, v2 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i64 1, i8* %A)
+	%tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i64 1, ptr %A)
 	ret %struct.__neon_int8x16x3_t  %tmp2
 }
 
-define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i8* %A) nounwind {
+define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4lane_16b
 ; CHECK: ld4.b { v0, v1, v2, v3 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i64 1, i8* %A)
+	%tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i64 1, ptr %A)
 	ret %struct.__neon_int8x16x4_t  %tmp2
 }
 
-declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
-declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
-declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readonly
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0(<16 x i8>, <16 x i8>, i64, ptr) nounwind readonly
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, i64, ptr) nounwind readonly
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, ptr) nounwind readonly
 
-define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, i16* %A) nounwind {
+define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2lane_8h
 ; CHECK: ld2.h { v0, v1 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, i64 1, i16* %A)
+	%tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0(<8 x i16> %L1, <8 x i16> %L2, i64 1, ptr %A)
 	ret %struct.__neon_int16x8x2_t  %tmp2
 }
 
-define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i16* %A) nounwind {
+define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3lane_8h
 ; CHECK: ld3.h { v0, v1, v2 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i64 1, i16* %A)
+	%tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i64 1, ptr %A)
 	ret %struct.__neon_int16x8x3_t  %tmp2
 }
 
-define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i16* %A) nounwind {
+define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4lane_8h
 ; CHECK: ld4.h { v0, v1, v2, v3 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i64 1, i16* %A)
+	%tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i64 1, ptr %A)
 	ret %struct.__neon_int16x8x4_t  %tmp2
 }
 
-declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
-declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
-declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readonly
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0(<8 x i16>, <8 x i16>, i64, ptr) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, i64, ptr) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, ptr) nounwind readonly
 
-define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, i32* %A) nounwind {
+define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2lane_4s
 ; CHECK: ld2.s { v0, v1 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, i64 1, i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32> %L1, <4 x i32> %L2, i64 1, ptr %A)
 	ret %struct.__neon_int32x4x2_t  %tmp2
 }
 
-define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i32* %A) nounwind {
+define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3lane_4s
 ; CHECK: ld3.s { v0, v1, v2 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i64 1, i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i64 1, ptr %A)
 	ret %struct.__neon_int32x4x3_t  %tmp2
 }
 
-define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i32* %A) nounwind {
+define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4lane_4s
 ; CHECK: ld4.s { v0, v1, v2, v3 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i64 1, i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i64 1, ptr %A)
 	ret %struct.__neon_int32x4x4_t  %tmp2
 }
 
-declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32>, <4 x i32>, i64, ptr) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readonly
 
-define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, i64* %A) nounwind {
+define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2lane_2d
 ; CHECK: ld2.d { v0, v1 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, i64 1, i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0(<2 x i64> %L1, <2 x i64> %L2, i64 1, ptr %A)
 	ret %struct.__neon_int64x2x2_t  %tmp2
 }
 
-define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64* %A) nounwind {
+define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3lane_2d
 ; CHECK: ld3.d { v0, v1, v2 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64 1, i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64 1, ptr %A)
 	ret %struct.__neon_int64x2x3_t  %tmp2
 }
 
-define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64* %A) nounwind {
+define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, ptr %A) nounwind {
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4lane_2d
 ; CHECK: ld4.d { v0, v1, v2, v3 }[1], [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64 1, i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64 1, ptr %A)
 	ret %struct.__neon_int64x2x4_t  %tmp2
 }
 
-declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
-declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
-declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readonly
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readonly
 
-define <8 x i8> @ld1r_8b(i8* %bar) {
+define <8 x i8> @ld1r_8b(ptr %bar) {
 ; CHECK: ld1r_8b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.8b { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
   %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
   %tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
@@ -461,12 +461,12 @@ define <8 x i8> @ld1r_8b(i8* %bar) {
   ret <8 x i8> %tmp9
 }
 
-define <16 x i8> @ld1r_16b(i8* %bar) {
+define <16 x i8> @ld1r_16b(ptr %bar) {
 ; CHECK: ld1r_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.16b { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
   %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
   %tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
@@ -486,12 +486,12 @@ define <16 x i8> @ld1r_16b(i8* %bar) {
   ret <16 x i8> %tmp17
 }
 
-define <4 x i16> @ld1r_4h(i16* %bar) {
+define <4 x i16> @ld1r_4h(ptr %bar) {
 ; CHECK: ld1r_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.4h { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
   %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
   %tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
@@ -499,12 +499,12 @@ define <4 x i16> @ld1r_4h(i16* %bar) {
   ret <4 x i16> %tmp5
 }
 
-define <8 x i16> @ld1r_8h(i16* %bar) {
+define <8 x i16> @ld1r_8h(ptr %bar) {
 ; CHECK: ld1r_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.8h { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
   %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
   %tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
@@ -516,23 +516,23 @@ define <8 x i16> @ld1r_8h(i16* %bar) {
   ret <8 x i16> %tmp9
 }
 
-define <2 x i32> @ld1r_2s(i32* %bar) {
+define <2 x i32> @ld1r_2s(ptr %bar) {
 ; CHECK: ld1r_2s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.2s { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
   %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @ld1r_4s(i32* %bar) {
+define <4 x i32> @ld1r_4s(ptr %bar) {
 ; CHECK: ld1r_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.4s { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
   %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
   %tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
@@ -540,377 +540,377 @@ define <4 x i32> @ld1r_4s(i32* %bar) {
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @ld1r_2d(i64* %bar) {
+define <2 x i64> @ld1r_2d(ptr %bar) {
 ; CHECK: ld1r_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.2d { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i64, i64* %bar
+  %tmp1 = load i64, ptr %bar
   %tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
   %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
   ret <2 x i64> %tmp3
 }
 
-define %struct.__neon_int8x8x2_t @ld2r_8b(i8* %A) nounwind {
+define %struct.__neon_int8x8x2_t @ld2r_8b(ptr %A) nounwind {
 ; CHECK: ld2r_8b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.8b { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0(ptr %A)
 	ret %struct.__neon_int8x8x2_t  %tmp2
 }
 
-define %struct.__neon_int8x8x3_t @ld3r_8b(i8* %A) nounwind {
+define %struct.__neon_int8x8x3_t @ld3r_8b(ptr %A) nounwind {
 ; CHECK: ld3r_8b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.8b { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0(ptr %A)
 	ret %struct.__neon_int8x8x3_t  %tmp2
 }
 
-define %struct.__neon_int8x8x4_t @ld4r_8b(i8* %A) nounwind {
+define %struct.__neon_int8x8x4_t @ld4r_8b(ptr %A) nounwind {
 ; CHECK: ld4r_8b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.8b { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0(ptr %A)
 	ret %struct.__neon_int8x8x4_t  %tmp2
 }
 
-declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0(ptr) nounwind readonly
 
-define %struct.__neon_int8x16x2_t @ld2r_16b(i8* %A) nounwind {
+define %struct.__neon_int8x16x2_t @ld2r_16b(ptr %A) nounwind {
 ; CHECK: ld2r_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.16b { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0(ptr %A)
 	ret %struct.__neon_int8x16x2_t  %tmp2
 }
 
-define %struct.__neon_int8x16x3_t @ld3r_16b(i8* %A) nounwind {
+define %struct.__neon_int8x16x3_t @ld3r_16b(ptr %A) nounwind {
 ; CHECK: ld3r_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.16b { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0(ptr %A)
 	ret %struct.__neon_int8x16x3_t  %tmp2
 }
 
-define %struct.__neon_int8x16x4_t @ld4r_16b(i8* %A) nounwind {
+define %struct.__neon_int8x16x4_t @ld4r_16b(ptr %A) nounwind {
 ; CHECK: ld4r_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.16b { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
+	%tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0(ptr %A)
 	ret %struct.__neon_int8x16x4_t  %tmp2
 }
 
-declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8*) nounwind readonly
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0(ptr) nounwind readonly
 
-define %struct.__neon_int16x4x2_t @ld2r_4h(i16* %A) nounwind {
+define %struct.__neon_int16x4x2_t @ld2r_4h(ptr %A) nounwind {
 ; CHECK: ld2r_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.4h { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
+	%tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0(ptr %A)
 	ret %struct.__neon_int16x4x2_t  %tmp2
 }
 
-define %struct.__neon_int16x4x3_t @ld3r_4h(i16* %A) nounwind {
+define %struct.__neon_int16x4x3_t @ld3r_4h(ptr %A) nounwind {
 ; CHECK: ld3r_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.4h { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
+	%tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0(ptr %A)
 	ret %struct.__neon_int16x4x3_t  %tmp2
 }
 
-define %struct.__neon_int16x4x4_t @ld4r_4h(i16* %A) nounwind {
+define %struct.__neon_int16x4x4_t @ld4r_4h(ptr %A) nounwind {
 ; CHECK: ld4r_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.4h { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
+	%tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0(ptr %A)
 	ret %struct.__neon_int16x4x4_t  %tmp2
 }
 
-declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0(ptr) nounwind readonly
 
-define %struct.__neon_int16x8x2_t @ld2r_8h(i16* %A) nounwind {
+define %struct.__neon_int16x8x2_t @ld2r_8h(ptr %A) nounwind {
 ; CHECK: ld2r_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.8h { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
+  %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0(ptr %A)
   ret %struct.__neon_int16x8x2_t  %tmp2
 }
 
-define %struct.__neon_int16x8x3_t @ld3r_8h(i16* %A) nounwind {
+define %struct.__neon_int16x8x3_t @ld3r_8h(ptr %A) nounwind {
 ; CHECK: ld3r_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.8h { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
+  %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0(ptr %A)
   ret %struct.__neon_int16x8x3_t  %tmp2
 }
 
-define %struct.__neon_int16x8x4_t @ld4r_8h(i16* %A) nounwind {
+define %struct.__neon_int16x8x4_t @ld4r_8h(ptr %A) nounwind {
 ; CHECK: ld4r_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.8h { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
+  %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0(ptr %A)
   ret %struct.__neon_int16x8x4_t  %tmp2
 }
 
-declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16*) nounwind readonly
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0(ptr) nounwind readonly
 
-define %struct.__neon_int32x2x2_t @ld2r_2s(i32* %A) nounwind {
+define %struct.__neon_int32x2x2_t @ld2r_2s(ptr %A) nounwind {
 ; CHECK: ld2r_2s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.2s { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0(ptr %A)
 	ret %struct.__neon_int32x2x2_t  %tmp2
 }
 
-define %struct.__neon_int32x2x3_t @ld3r_2s(i32* %A) nounwind {
+define %struct.__neon_int32x2x3_t @ld3r_2s(ptr %A) nounwind {
 ; CHECK: ld3r_2s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.2s { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0(ptr %A)
 	ret %struct.__neon_int32x2x3_t  %tmp2
 }
 
-define %struct.__neon_int32x2x4_t @ld4r_2s(i32* %A) nounwind {
+define %struct.__neon_int32x2x4_t @ld4r_2s(ptr %A) nounwind {
 ; CHECK: ld4r_2s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0(ptr %A)
 	ret %struct.__neon_int32x2x4_t  %tmp2
 }
 
-declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0(ptr) nounwind readonly
 
-define %struct.__neon_int32x4x2_t @ld2r_4s(i32* %A) nounwind {
+define %struct.__neon_int32x4x2_t @ld2r_4s(ptr %A) nounwind {
 ; CHECK: ld2r_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.4s { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0(ptr %A)
 	ret %struct.__neon_int32x4x2_t  %tmp2
 }
 
-define %struct.__neon_int32x4x3_t @ld3r_4s(i32* %A) nounwind {
+define %struct.__neon_int32x4x3_t @ld3r_4s(ptr %A) nounwind {
 ; CHECK: ld3r_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.4s { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0(ptr %A)
 	ret %struct.__neon_int32x4x3_t  %tmp2
 }
 
-define %struct.__neon_int32x4x4_t @ld4r_4s(i32* %A) nounwind {
+define %struct.__neon_int32x4x4_t @ld4r_4s(ptr %A) nounwind {
 ; CHECK: ld4r_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
+	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0(ptr %A)
 	ret %struct.__neon_int32x4x4_t  %tmp2
 }
 
-declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32*) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0(ptr) nounwind readonly
 
-define %struct.__neon_int64x1x2_t @ld2r_1d(i64* %A) nounwind {
+define %struct.__neon_int64x1x2_t @ld2r_1d(ptr %A) nounwind {
 ; CHECK: ld2r_1d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.1d { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0(ptr %A)
 	ret %struct.__neon_int64x1x2_t  %tmp2
 }
 
-define %struct.__neon_int64x1x3_t @ld3r_1d(i64* %A) nounwind {
+define %struct.__neon_int64x1x3_t @ld3r_1d(ptr %A) nounwind {
 ; CHECK: ld3r_1d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.1d { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0(ptr %A)
 	ret %struct.__neon_int64x1x3_t  %tmp2
 }
 
-define %struct.__neon_int64x1x4_t @ld4r_1d(i64* %A) nounwind {
+define %struct.__neon_int64x1x4_t @ld4r_1d(ptr %A) nounwind {
 ; CHECK: ld4r_1d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0(ptr %A)
 	ret %struct.__neon_int64x1x4_t  %tmp2
 }
 
-declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0(ptr) nounwind readonly
 
-define %struct.__neon_int64x2x2_t @ld2r_2d(i64* %A) nounwind {
+define %struct.__neon_int64x2x2_t @ld2r_2d(ptr %A) nounwind {
 ; CHECK: ld2r_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld2r.2d { v0, v1 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0(ptr %A)
 	ret %struct.__neon_int64x2x2_t  %tmp2
 }
 
-define %struct.__neon_int64x2x3_t @ld3r_2d(i64* %A) nounwind {
+define %struct.__neon_int64x2x3_t @ld3r_2d(ptr %A) nounwind {
 ; CHECK: ld3r_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld3r.2d { v0, v1, v2 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0(ptr %A)
 	ret %struct.__neon_int64x2x3_t  %tmp2
 }
 
-define %struct.__neon_int64x2x4_t @ld4r_2d(i64* %A) nounwind {
+define %struct.__neon_int64x2x4_t @ld4r_2d(ptr %A) nounwind {
 ; CHECK: ld4r_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0]
 ; CHECK-NEXT: ret
-	%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
+	%tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0(ptr %A)
 	ret %struct.__neon_int64x2x4_t  %tmp2
 }
 
-declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64*) nounwind readonly
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0(ptr) nounwind readonly
 
-define <16 x i8> @ld1_16b(<16 x i8> %V, i8* %bar) {
+define <16 x i8> @ld1_16b(<16 x i8> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_16b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.b { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <16 x i8> %V, i8 %tmp1, i32 0
   ret <16 x i8> %tmp2
 }
 
-define <8 x i16> @ld1_8h(<8 x i16> %V, i16* %bar) {
+define <8 x i16> @ld1_8h(<8 x i16> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_8h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.h { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <8 x i16> %V, i16 %tmp1, i32 0
   ret <8 x i16> %tmp2
 }
 
-define <4 x i32> @ld1_4s(<4 x i32> %V, i32* %bar) {
+define <4 x i32> @ld1_4s(<4 x i32> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_4s
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.s { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <4 x i32> %V, i32 %tmp1, i32 0
   ret <4 x i32> %tmp2
 }
 
-define <4 x float> @ld1_4s_float(<4 x float> %V, float* %bar) {
+define <4 x float> @ld1_4s_float(<4 x float> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_4s_float:
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.s { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <4 x float> %V, float %tmp1, i32 0
   ret <4 x float> %tmp2
 }
 
-define <2 x i64> @ld1_2d(<2 x i64> %V, i64* %bar) {
+define <2 x i64> @ld1_2d(<2 x i64> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_2d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.d { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i64, i64* %bar
+  %tmp1 = load i64, ptr %bar
   %tmp2 = insertelement <2 x i64> %V, i64 %tmp1, i32 0
   ret <2 x i64> %tmp2
 }
 
-define <2 x double> @ld1_2d_double(<2 x double> %V, double* %bar) {
+define <2 x double> @ld1_2d_double(<2 x double> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_2d_double:
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.d { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load double, double* %bar
+  %tmp1 = load double, ptr %bar
   %tmp2 = insertelement <2 x double> %V, double %tmp1, i32 0
   ret <2 x double> %tmp2
 }
 
-define <1 x i64> @ld1_1d(<1 x i64>* %p) {
+define <1 x i64> @ld1_1d(ptr %p) {
 ; CHECK-LABEL: ld1_1d
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ldr [[REG:d[0-9]+]], [x0]
 ; CHECK-NEXT: ret
-  %tmp = load <1 x i64>, <1 x i64>* %p, align 8
+  %tmp = load <1 x i64>, ptr %p, align 8
   ret <1 x i64> %tmp
 }
 
-define <8 x i8> @ld1_8b(<8 x i8> %V, i8* %bar) {
+define <8 x i8> @ld1_8b(<8 x i8> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_8b
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.b { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i8, i8* %bar
+  %tmp1 = load i8, ptr %bar
   %tmp2 = insertelement <8 x i8> %V, i8 %tmp1, i32 0
   ret <8 x i8> %tmp2
 }
 
-define <4 x i16> @ld1_4h(<4 x i16> %V, i16* %bar) {
+define <4 x i16> @ld1_4h(<4 x i16> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_4h
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.h { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i16, i16* %bar
+  %tmp1 = load i16, ptr %bar
   %tmp2 = insertelement <4 x i16> %V, i16 %tmp1, i32 0
   ret <4 x i16> %tmp2
 }
 
-define <2 x i32> @ld1_2s(<2 x i32> %V, i32* %bar) {
+define <2 x i32> @ld1_2s(<2 x i32> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_2s:
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.s { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load i32, i32* %bar
+  %tmp1 = load i32, ptr %bar
   %tmp2 = insertelement <2 x i32> %V, i32 %tmp1, i32 0
   ret <2 x i32> %tmp2
 }
 
-define <2 x float> @ld1_2s_float(<2 x float> %V, float* %bar) {
+define <2 x float> @ld1_2s_float(<2 x float> %V, ptr %bar) {
 ; CHECK-LABEL: ld1_2s_float:
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1.s { v0 }[0], [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load float, float* %bar
+  %tmp1 = load float, ptr %bar
   %tmp2 = insertelement <2 x float> %V, float %tmp1, i32 0
   ret <2 x float> %tmp2
 }
 
 
 ; Add rdar://13098923 test case: vld1_dup_u32 doesn't generate ld1r.2s
-define void @ld1r_2s_from_dup(i8* nocapture %a, i8* nocapture %b, i16* nocapture %
diff ) nounwind ssp {
+define void @ld1r_2s_from_dup(ptr nocapture %a, ptr nocapture %b, ptr nocapture %
diff ) nounwind ssp {
 entry:
 ; CHECK: ld1r_2s_from_dup
 ; CHECK: ld1r.2s { [[ARG1:v[0-9]+]] }, [x0]
@@ -920,13 +920,11 @@ entry:
 ; CHECK-NEXT: sub.4h v[[RESREGNUM:[0-9]+]], [[ARG1]], [[ARG2]]
 ; CHECK-NEXT: str d[[RESREGNUM]], [x2]
 ; CHECK-NEXT: ret
-  %tmp = bitcast i8* %a to i32*
-  %tmp1 = load i32, i32* %tmp, align 4
+  %tmp1 = load i32, ptr %a, align 4
   %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
   %lane = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
   %tmp3 = bitcast <2 x i32> %lane to <8 x i8>
-  %tmp4 = bitcast i8* %b to i32*
-  %tmp5 = load i32, i32* %tmp4, align 4
+  %tmp5 = load i32, ptr %b, align 4
   %tmp6 = insertelement <2 x i32> undef, i32 %tmp5, i32 0
   %lane1 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> zeroinitializer
   %tmp7 = bitcast <2 x i32> %lane1 to <8 x i8>
@@ -936,19 +934,18 @@ entry:
   %tmp8 = bitcast <8 x i16> %sub.i to <2 x i64>
   %shuffle.i = shufflevector <2 x i64> %tmp8, <2 x i64> undef, <1 x i32> zeroinitializer
   %tmp9 = bitcast <1 x i64> %shuffle.i to <4 x i16>
-  %tmp10 = bitcast i16* %
diff  to <4 x i16>*
-  store <4 x i16> %tmp9, <4 x i16>* %tmp10, align 8
+  store <4 x i16> %tmp9, ptr %
diff , align 8
   ret void
 }
 
 ; Tests for rdar://11947069: vld1_dup_* and vld1q_dup_* code gen is suboptimal
-define <4 x float> @ld1r_4s_float(float* nocapture %x) {
+define <4 x float> @ld1r_4s_float(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_4s_float
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.4s { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load float, float* %x, align 4
+  %tmp = load float, ptr %x, align 4
   %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
   %tmp2 = insertelement <4 x float> %tmp1, float %tmp, i32 1
   %tmp3 = insertelement <4 x float> %tmp2, float %tmp, i32 2
@@ -956,84 +953,84 @@ entry:
   ret <4 x float> %tmp4
 }
 
-define <2 x float> @ld1r_2s_float(float* nocapture %x) {
+define <2 x float> @ld1r_2s_float(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_2s_float
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.2s { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load float, float* %x, align 4
+  %tmp = load float, ptr %x, align 4
   %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
   %tmp2 = insertelement <2 x float> %tmp1, float %tmp, i32 1
   ret <2 x float> %tmp2
 }
 
-define <2 x double> @ld1r_2d_double(double* nocapture %x) {
+define <2 x double> @ld1r_2d_double(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_2d_double
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.2d { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load double, double* %x, align 4
+  %tmp = load double, ptr %x, align 4
   %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
   %tmp2 = insertelement <2 x double> %tmp1, double %tmp, i32 1
   ret <2 x double> %tmp2
 }
 
-define <1 x double> @ld1r_1d_double(double* nocapture %x) {
+define <1 x double> @ld1r_1d_double(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_1d_double
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ldr d0, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load double, double* %x, align 4
+  %tmp = load double, ptr %x, align 4
   %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
   ret <1 x double> %tmp1
 }
 
-define <4 x float> @ld1r_4s_float_shuff(float* nocapture %x) {
+define <4 x float> @ld1r_4s_float_shuff(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_4s_float_shuff
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.4s { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load float, float* %x, align 4
+  %tmp = load float, ptr %x, align 4
   %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
   %lane = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %lane
 }
 
-define <2 x float> @ld1r_2s_float_shuff(float* nocapture %x) {
+define <2 x float> @ld1r_2s_float_shuff(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_2s_float_shuff
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.2s { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load float, float* %x, align 4
+  %tmp = load float, ptr %x, align 4
   %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
   %lane = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
   ret <2 x float> %lane
 }
 
-define <2 x double> @ld1r_2d_double_shuff(double* nocapture %x) {
+define <2 x double> @ld1r_2d_double_shuff(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_2d_double_shuff
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ld1r.2d { v0 }, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load double, double* %x, align 4
+  %tmp = load double, ptr %x, align 4
   %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
   %lane = shufflevector <2 x double> %tmp1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %lane
 }
 
-define <1 x double> @ld1r_1d_double_shuff(double* nocapture %x) {
+define <1 x double> @ld1r_1d_double_shuff(ptr nocapture %x) {
 entry:
 ; CHECK-LABEL: ld1r_1d_double_shuff
 ; Make sure we are using the operands defined by the ABI
 ; CHECK: ldr d0, [x0]
 ; CHECK-NEXT: ret
-  %tmp = load double, double* %x, align 4
+  %tmp = load double, ptr %x, align 4
   %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
   %lane = shufflevector <1 x double> %tmp1, <1 x double> undef, <1 x i32> zeroinitializer
   ret <1 x double> %lane
@@ -1043,52 +1040,52 @@ entry:
 %struct.__neon_float32x2x3_t = type { <2 x float>,  <2 x float>,  <2 x float> }
 %struct.__neon_float32x2x4_t = type { <2 x float>,  <2 x float>, <2 x float>,  <2 x float> }
 
-declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float*) nounwind readonly
-declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld1x2.v8i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld1x2.v4i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld1x2.v2i32.p0(ptr) nounwind readonly
+declare %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr) nounwind readonly
 
-define %struct.__neon_int8x8x2_t @ld1_x2_v8i8(i8* %addr) {
+define %struct.__neon_int8x8x2_t @ld1_x2_v8i8(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v8i8:
 ; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %addr)
+  %val = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld1x2.v8i8.p0(ptr %addr)
   ret %struct.__neon_int8x8x2_t %val
 }
 
-define %struct.__neon_int16x4x2_t @ld1_x2_v4i16(i16* %addr) {
+define %struct.__neon_int16x4x2_t @ld1_x2_v4i16(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v4i16:
 ; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %addr)
+  %val = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld1x2.v4i16.p0(ptr %addr)
   ret %struct.__neon_int16x4x2_t %val
 }
 
-define %struct.__neon_int32x2x2_t @ld1_x2_v2i32(i32* %addr) {
+define %struct.__neon_int32x2x2_t @ld1_x2_v2i32(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v2i32:
 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %addr)
+  %val = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld1x2.v2i32.p0(ptr %addr)
   ret %struct.__neon_int32x2x2_t %val
 }
 
-define %struct.__neon_float32x2x2_t @ld1_x2_v2f32(float* %addr) {
+define %struct.__neon_float32x2x2_t @ld1_x2_v2f32(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v2f32:
 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x2_t @llvm.aarch64.neon.ld1x2.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x2_t %val
 }
 
-define %struct.__neon_int64x1x2_t @ld1_x2_v1i64(i64* %addr) {
+define %struct.__neon_int64x1x2_t @ld1_x2_v1i64(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v1i64:
 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %addr)
+  %val = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld1x2.v1i64.p0(ptr %addr)
   ret %struct.__neon_int64x1x2_t %val
 }
 
-define %struct.__neon_float64x1x2_t @ld1_x2_v1f64(double* %addr) {
+define %struct.__neon_float64x1x2_t @ld1_x2_v1f64(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v1f64:
 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %addr)
+  %val = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld1x2.v1f64.p0(ptr %addr)
   ret %struct.__neon_float64x1x2_t %val
 }
 
@@ -1101,247 +1098,247 @@ define %struct.__neon_float64x1x2_t @ld1_x2_v1f64(double* %addr) {
 %struct.__neon_float64x2x3_t = type { <2 x double>,  <2 x double>,  <2 x double> }
 %struct.__neon_float64x2x4_t = type { <2 x double>,  <2 x double>, <2 x double>,  <2 x double> }
 
-declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_float32x4x2_t @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float*) nounwind readonly
-declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_float64x2x2_t @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld1x2.v16i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld1x2.v8i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld1x2.v4i32.p0(ptr) nounwind readonly
+declare %struct.__neon_float32x4x2_t @llvm.aarch64.neon.ld1x2.v4f32.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x2x2_t @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr) nounwind readonly
 
-define %struct.__neon_int8x16x2_t @ld1_x2_v16i8(i8* %addr) {
+define %struct.__neon_int8x16x2_t @ld1_x2_v16i8(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v16i8:
 ; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %addr)
+  %val = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld1x2.v16i8.p0(ptr %addr)
   ret %struct.__neon_int8x16x2_t %val
 }
 
-define %struct.__neon_int16x8x2_t @ld1_x2_v8i16(i16* %addr) {
+define %struct.__neon_int16x8x2_t @ld1_x2_v8i16(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v8i16:
 ; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %addr)
+  %val = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld1x2.v8i16.p0(ptr %addr)
   ret %struct.__neon_int16x8x2_t %val
 }
 
-define %struct.__neon_int32x4x2_t @ld1_x2_v4i32(i32* %addr) {
+define %struct.__neon_int32x4x2_t @ld1_x2_v4i32(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v4i32:
 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %addr)
+  %val = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld1x2.v4i32.p0(ptr %addr)
   ret %struct.__neon_int32x4x2_t %val
 }
 
-define %struct.__neon_float32x4x2_t @ld1_x2_v4f32(float* %addr) {
+define %struct.__neon_float32x4x2_t @ld1_x2_v4f32(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v4f32:
 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float32x4x2_t @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x4x2_t @llvm.aarch64.neon.ld1x2.v4f32.p0(ptr %addr)
   ret %struct.__neon_float32x4x2_t %val
 }
 
-define %struct.__neon_int64x2x2_t @ld1_x2_v2i64(i64* %addr) {
+define %struct.__neon_int64x2x2_t @ld1_x2_v2i64(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v2i64:
 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %addr)
+  %val = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld1x2.v2i64.p0(ptr %addr)
   ret %struct.__neon_int64x2x2_t %val
 }
 
-define %struct.__neon_float64x2x2_t @ld1_x2_v2f64(double* %addr) {
+define %struct.__neon_float64x2x2_t @ld1_x2_v2f64(ptr %addr) {
 ; CHECK-LABEL: ld1_x2_v2f64:
 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float64x2x2_t @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %addr)
+  %val = call %struct.__neon_float64x2x2_t @llvm.aarch64.neon.ld1x2.v2f64.p0(ptr %addr)
   ret %struct.__neon_float64x2x2_t %val
 }
 
-declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float*) nounwind readonly
-declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld1x3.v8i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld1x3.v4i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld1x3.v2i32.p0(ptr) nounwind readonly
+declare %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr) nounwind readonly
 
-define %struct.__neon_int8x8x3_t @ld1_x3_v8i8(i8* %addr) {
+define %struct.__neon_int8x8x3_t @ld1_x3_v8i8(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v8i8:
 ; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %addr)
+  %val = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld1x3.v8i8.p0(ptr %addr)
   ret %struct.__neon_int8x8x3_t %val
 }
 
-define %struct.__neon_int16x4x3_t @ld1_x3_v4i16(i16* %addr) {
+define %struct.__neon_int16x4x3_t @ld1_x3_v4i16(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v4i16:
 ; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %addr)
+  %val = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld1x3.v4i16.p0(ptr %addr)
   ret %struct.__neon_int16x4x3_t %val
 }
 
-define %struct.__neon_int32x2x3_t @ld1_x3_v2i32(i32* %addr) {
+define %struct.__neon_int32x2x3_t @ld1_x3_v2i32(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v2i32:
 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %addr)
+  %val = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld1x3.v2i32.p0(ptr %addr)
   ret %struct.__neon_int32x2x3_t %val
 }
 
-define %struct.__neon_float32x2x3_t @ld1_x3_v2f32(float* %addr) {
+define %struct.__neon_float32x2x3_t @ld1_x3_v2f32(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v2f32:
 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x3_t @llvm.aarch64.neon.ld1x3.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x3_t %val
 }
 
-define %struct.__neon_int64x1x3_t @ld1_x3_v1i64(i64* %addr) {
+define %struct.__neon_int64x1x3_t @ld1_x3_v1i64(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v1i64:
 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %addr)
+  %val = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld1x3.v1i64.p0(ptr %addr)
   ret %struct.__neon_int64x1x3_t %val
 }
 
-define %struct.__neon_float64x1x3_t @ld1_x3_v1f64(double* %addr) {
+define %struct.__neon_float64x1x3_t @ld1_x3_v1f64(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v1f64:
 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %addr)
+  %val = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld1x3.v1f64.p0(ptr %addr)
   ret %struct.__neon_float64x1x3_t %val
 }
 
-declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_float32x4x3_t @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float*) nounwind readonly
-declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_float64x2x3_t @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld1x3.v16i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld1x3.v8i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld1x3.v4i32.p0(ptr) nounwind readonly
+declare %struct.__neon_float32x4x3_t @llvm.aarch64.neon.ld1x3.v4f32.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x2x3_t @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr) nounwind readonly
 
-define %struct.__neon_int8x16x3_t @ld1_x3_v16i8(i8* %addr) {
+define %struct.__neon_int8x16x3_t @ld1_x3_v16i8(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v16i8:
 ; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %addr)
+  %val = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld1x3.v16i8.p0(ptr %addr)
   ret %struct.__neon_int8x16x3_t %val
 }
 
-define %struct.__neon_int16x8x3_t @ld1_x3_v8i16(i16* %addr) {
+define %struct.__neon_int16x8x3_t @ld1_x3_v8i16(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v8i16:
 ; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %addr)
+  %val = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld1x3.v8i16.p0(ptr %addr)
   ret %struct.__neon_int16x8x3_t %val
 }
 
-define %struct.__neon_int32x4x3_t @ld1_x3_v4i32(i32* %addr) {
+define %struct.__neon_int32x4x3_t @ld1_x3_v4i32(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v4i32:
 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %addr)
+  %val = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld1x3.v4i32.p0(ptr %addr)
   ret %struct.__neon_int32x4x3_t %val
 }
 
-define %struct.__neon_float32x4x3_t @ld1_x3_v4f32(float* %addr) {
+define %struct.__neon_float32x4x3_t @ld1_x3_v4f32(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v4f32:
 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float32x4x3_t @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x4x3_t @llvm.aarch64.neon.ld1x3.v4f32.p0(ptr %addr)
   ret %struct.__neon_float32x4x3_t %val
 }
 
-define %struct.__neon_int64x2x3_t @ld1_x3_v2i64(i64* %addr) {
+define %struct.__neon_int64x2x3_t @ld1_x3_v2i64(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v2i64:
 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %addr)
+  %val = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld1x3.v2i64.p0(ptr %addr)
   ret %struct.__neon_int64x2x3_t %val
 }
 
-define %struct.__neon_float64x2x3_t @ld1_x3_v2f64(double* %addr) {
+define %struct.__neon_float64x2x3_t @ld1_x3_v2f64(ptr %addr) {
 ; CHECK-LABEL: ld1_x3_v2f64:
 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float64x2x3_t @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %addr)
+  %val = call %struct.__neon_float64x2x3_t @llvm.aarch64.neon.ld1x3.v2f64.p0(ptr %addr)
   ret %struct.__neon_float64x2x3_t %val
 }
 
-declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float*) nounwind readonly
-declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld1x4.v8i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld1x4.v4i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld1x4.v2i32.p0(ptr) nounwind readonly
+declare %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr) nounwind readonly
 
-define %struct.__neon_int8x8x4_t @ld1_x4_v8i8(i8* %addr) {
+define %struct.__neon_int8x8x4_t @ld1_x4_v8i8(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v8i8:
 ; CHECK: ld1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %addr)
+  %val = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld1x4.v8i8.p0(ptr %addr)
   ret %struct.__neon_int8x8x4_t %val
 }
 
-define %struct.__neon_int16x4x4_t @ld1_x4_v4i16(i16* %addr) {
+define %struct.__neon_int16x4x4_t @ld1_x4_v4i16(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v4i16:
 ; CHECK: ld1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %addr)
+  %val = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld1x4.v4i16.p0(ptr %addr)
   ret %struct.__neon_int16x4x4_t %val
 }
 
-define %struct.__neon_int32x2x4_t @ld1_x4_v2i32(i32* %addr) {
+define %struct.__neon_int32x2x4_t @ld1_x4_v2i32(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v2i32:
 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %addr)
+  %val = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld1x4.v2i32.p0(ptr %addr)
   ret %struct.__neon_int32x2x4_t %val
 }
 
-define %struct.__neon_float32x2x4_t @ld1_x4_v2f32(float* %addr) {
+define %struct.__neon_float32x2x4_t @ld1_x4_v2f32(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v2f32:
 ; CHECK: ld1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x2x4_t @llvm.aarch64.neon.ld1x4.v2f32.p0(ptr %addr)
   ret %struct.__neon_float32x2x4_t %val
 }
 
-define %struct.__neon_int64x1x4_t @ld1_x4_v1i64(i64* %addr) {
+define %struct.__neon_int64x1x4_t @ld1_x4_v1i64(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v1i64:
 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %addr)
+  %val = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld1x4.v1i64.p0(ptr %addr)
   ret %struct.__neon_int64x1x4_t %val
 }
 
-define %struct.__neon_float64x1x4_t @ld1_x4_v1f64(double* %addr) {
+define %struct.__neon_float64x1x4_t @ld1_x4_v1f64(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v1f64:
 ; CHECK: ld1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %addr)
+  %val = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld1x4.v1f64.p0(ptr %addr)
   ret %struct.__neon_float64x1x4_t %val
 }
 
-declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16*) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32*) nounwind readonly
-declare %struct.__neon_float32x4x4_t @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float*) nounwind readonly
-declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64*) nounwind readonly
-declare %struct.__neon_float64x2x4_t @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double*) nounwind readonly
+declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld1x4.v16i8.p0(ptr) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld1x4.v8i16.p0(ptr) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld1x4.v4i32.p0(ptr) nounwind readonly
+declare %struct.__neon_float32x4x4_t @llvm.aarch64.neon.ld1x4.v4f32.p0(ptr) nounwind readonly
+declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr) nounwind readonly
+declare %struct.__neon_float64x2x4_t @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr) nounwind readonly
 
-define %struct.__neon_int8x16x4_t @ld1_x4_v16i8(i8* %addr) {
+define %struct.__neon_int8x16x4_t @ld1_x4_v16i8(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v16i8:
 ; CHECK: ld1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %addr)
+  %val = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld1x4.v16i8.p0(ptr %addr)
   ret %struct.__neon_int8x16x4_t %val
 }
 
-define %struct.__neon_int16x8x4_t @ld1_x4_v8i16(i16* %addr) {
+define %struct.__neon_int16x8x4_t @ld1_x4_v8i16(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v8i16:
 ; CHECK: ld1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %addr)
+  %val = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld1x4.v8i16.p0(ptr %addr)
   ret %struct.__neon_int16x8x4_t %val
 }
 
-define %struct.__neon_int32x4x4_t @ld1_x4_v4i32(i32* %addr) {
+define %struct.__neon_int32x4x4_t @ld1_x4_v4i32(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v4i32:
 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %addr)
+  %val = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld1x4.v4i32.p0(ptr %addr)
   ret %struct.__neon_int32x4x4_t %val
 }
 
-define %struct.__neon_float32x4x4_t @ld1_x4_v4f32(float* %addr) {
+define %struct.__neon_float32x4x4_t @ld1_x4_v4f32(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v4f32:
 ; CHECK: ld1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float32x4x4_t @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %addr)
+  %val = call %struct.__neon_float32x4x4_t @llvm.aarch64.neon.ld1x4.v4f32.p0(ptr %addr)
   ret %struct.__neon_float32x4x4_t %val
 }
 
-define %struct.__neon_int64x2x4_t @ld1_x4_v2i64(i64* %addr) {
+define %struct.__neon_int64x2x4_t @ld1_x4_v2i64(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v2i64:
 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %addr)
+  %val = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld1x4.v2i64.p0(ptr %addr)
   ret %struct.__neon_int64x2x4_t %val
 }
 
-define %struct.__neon_float64x2x4_t @ld1_x4_v2f64(double* %addr) {
+define %struct.__neon_float64x2x4_t @ld1_x4_v2f64(ptr %addr) {
 ; CHECK-LABEL: ld1_x4_v2f64:
 ; CHECK: ld1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  %val = call %struct.__neon_float64x2x4_t @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %addr)
+  %val = call %struct.__neon_float64x2x4_t @llvm.aarch64.neon.ld1x4.v2f64.p0(ptr %addr)
   ret %struct.__neon_float64x2x4_t %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll b/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll
index acc70988e3608..34b927e0a1b66 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll
@@ -7,12 +7,12 @@
 ; CHECK: ldp w8, w9, [x1]
 ; CHECK: str w0, [x1, #8]
 ; CHECK: ret
-define i32 @ldp_int_aa(i32 %a, i32* %p) nounwind {
-  %tmp = load i32, i32* %p, align 4
-  %str.ptr = getelementptr inbounds i32, i32* %p, i64 2
-  store i32 %a, i32* %str.ptr, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+define i32 @ldp_int_aa(i32 %a, ptr %p) nounwind {
+  %tmp = load i32, ptr %p, align 4
+  %str.ptr = getelementptr inbounds i32, ptr %p, i64 2
+  store i32 %a, ptr %str.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %add = add nsw i32 %tmp1, %tmp
   ret i32 %add
 }
@@ -21,12 +21,12 @@ define i32 @ldp_int_aa(i32 %a, i32* %p) nounwind {
 ; CHECK: ldp x8, x9, [x1]
 ; CHECK: str x0, [x1, #16]
 ; CHECK: ret
-define i64 @ldp_long_aa(i64 %a, i64* %p) nounwind {
-  %tmp = load i64, i64* %p, align 8
-  %str.ptr = getelementptr inbounds i64, i64* %p, i64 2
-  store i64 %a, i64* %str.ptr, align 4
-  %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
-  %tmp1 = load i64, i64* %add.ptr, align 8
+define i64 @ldp_long_aa(i64 %a, ptr %p) nounwind {
+  %tmp = load i64, ptr %p, align 8
+  %str.ptr = getelementptr inbounds i64, ptr %p, i64 2
+  store i64 %a, ptr %str.ptr, align 4
+  %add.ptr = getelementptr inbounds i64, ptr %p, i64 1
+  %tmp1 = load i64, ptr %add.ptr, align 8
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }
@@ -35,12 +35,12 @@ define i64 @ldp_long_aa(i64 %a, i64* %p) nounwind {
 ; CHECK: str s0, [x0, #8]
 ; CHECK: ldp s1, s0, [x0]
 ; CHECK: ret
-define float @ldp_float_aa(float %a, float* %p) nounwind {
-  %tmp = load float, float* %p, align 4
-  %str.ptr = getelementptr inbounds float, float* %p, i64 2
-  store float %a, float* %str.ptr, align 4
-  %add.ptr = getelementptr inbounds float, float* %p, i64 1
-  %tmp1 = load float, float* %add.ptr, align 4
+define float @ldp_float_aa(float %a, ptr %p) nounwind {
+  %tmp = load float, ptr %p, align 4
+  %str.ptr = getelementptr inbounds float, ptr %p, i64 2
+  store float %a, ptr %str.ptr, align 4
+  %add.ptr = getelementptr inbounds float, ptr %p, i64 1
+  %tmp1 = load float, ptr %add.ptr, align 4
   %add = fadd float %tmp, %tmp1
   ret float %add
 }
@@ -49,12 +49,12 @@ define float @ldp_float_aa(float %a, float* %p) nounwind {
 ; CHECK: str d0, [x0, #16]
 ; CHECK: ldp d1, d0, [x0]
 ; CHECK: ret
-define double @ldp_double_aa(double %a, double* %p) nounwind {
-  %tmp = load double, double* %p, align 8
-  %str.ptr = getelementptr inbounds double, double* %p, i64 2
-  store double %a, double* %str.ptr, align 4
-  %add.ptr = getelementptr inbounds double, double* %p, i64 1
-  %tmp1 = load double, double* %add.ptr, align 8
+define double @ldp_double_aa(double %a, ptr %p) nounwind {
+  %tmp = load double, ptr %p, align 8
+  %str.ptr = getelementptr inbounds double, ptr %p, i64 2
+  store double %a, ptr %str.ptr, align 4
+  %add.ptr = getelementptr inbounds double, ptr %p, i64 1
+  %tmp1 = load double, ptr %add.ptr, align 8
   %add = fadd double %tmp, %tmp1
   ret double %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
index a45373a1d2111..4fa34e846b206 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
@@ -8,11 +8,11 @@
 ; CHECK: Cluster ld/st SU(1) - SU(2)
 ; CHECK: SU(1):   %{{[0-9]+}}:gpr32 = LDRWui
 ; CHECK: SU(2):   %{{[0-9]+}}:gpr32 = LDRWui
-define i32 @ldr_int(i32* %a) nounwind {
-  %p1 = getelementptr inbounds i32, i32* %a, i32 1
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i32 2
-  %tmp2 = load i32, i32* %p2, align 2
+define i32 @ldr_int(ptr %a) nounwind {
+  %p1 = getelementptr inbounds i32, ptr %a, i32 1
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i32 2
+  %tmp2 = load i32, ptr %p2, align 2
   %tmp3 = add i32 %tmp1, %tmp2
   ret i32 %tmp3
 }
@@ -23,10 +23,10 @@ define i32 @ldr_int(i32* %a) nounwind {
 ; CHECK: Cluster ld/st SU(1) - SU(2)
 ; CHECK: SU(1):   %{{[0-9]+}}:gpr64 = LDRSWui
 ; CHECK: SU(2):   %{{[0-9]+}}:gpr64 = LDRSWui
-define i64 @ldp_sext_int(i32* %p) nounwind {
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+define i64 @ldp_sext_int(ptr %p) nounwind {
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = sext i32 %tmp to i64
   %sexttmp1 = sext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
@@ -39,11 +39,11 @@ define i64 @ldp_sext_int(i32* %p) nounwind {
 ; CHECK: Cluster ld/st SU(1) - SU(2)
 ; CHECK: SU(1):   %{{[0-9]+}}:gpr32 = LDURWi
 ; CHECK: SU(2):   %{{[0-9]+}}:gpr32 = LDURWi
-define i32 @ldur_int(i32* %a) nounwind {
-  %p1 = getelementptr inbounds i32, i32* %a, i32 -1
-  %tmp1 = load i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i32 -2
-  %tmp2 = load i32, i32* %p2, align 2
+define i32 @ldur_int(ptr %a) nounwind {
+  %p1 = getelementptr inbounds i32, ptr %a, i32 -1
+  %tmp1 = load i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i32 -2
+  %tmp2 = load i32, ptr %p2, align 2
   %tmp3 = add i32 %tmp1, %tmp2
   ret i32 %tmp3
 }
@@ -54,11 +54,11 @@ define i32 @ldur_int(i32* %a) nounwind {
 ; CHECK: Cluster ld/st SU(3) - SU(4)
 ; CHECK: SU(3):   %{{[0-9]+}}:gpr64 = LDRSWui
 ; CHECK: SU(4):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
-define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
-  %tmp0 = load i64, i64* %q, align 4
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+define i64 @ldp_half_sext_zext_int(ptr %q, ptr %p) nounwind {
+  %tmp0 = load i64, ptr %q, align 4
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = sext i32 %tmp to i64
   %sexttmp1 = zext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
@@ -72,11 +72,11 @@ define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
 ; CHECK: Cluster ld/st SU(3) - SU(4)
 ; CHECK: SU(3):   undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
 ; CHECK: SU(4):   %{{[0-9]+}}:gpr64 = LDRSWui
-define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
-  %tmp0 = load i64, i64* %q, align 4
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+define i64 @ldp_half_zext_sext_int(ptr %q, ptr %p) nounwind {
+  %tmp0 = load i64, ptr %q, align 4
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = zext i32 %tmp to i64
   %sexttmp1 = sext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
@@ -90,11 +90,11 @@ define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
 ; CHECK-NOT: Cluster ld/st
 ; CHECK: SU(1):   %{{[0-9]+}}:gpr32 = LDRWui
 ; CHECK: SU(2):   %{{[0-9]+}}:gpr32 = LDRWui
-define i32 @ldr_int_volatile(i32* %a) nounwind {
-  %p1 = getelementptr inbounds i32, i32* %a, i32 1
-  %tmp1 = load volatile i32, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %a, i32 2
-  %tmp2 = load volatile i32, i32* %p2, align 2
+define i32 @ldr_int_volatile(ptr %a) nounwind {
+  %p1 = getelementptr inbounds i32, ptr %a, i32 1
+  %tmp1 = load volatile i32, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %a, i32 2
+  %tmp2 = load volatile i32, ptr %p2, align 2
   %tmp3 = add i32 %tmp1, %tmp2
   ret i32 %tmp3
 }
@@ -105,13 +105,11 @@ define i32 @ldr_int_volatile(i32* %a) nounwind {
 ; CHECK: Cluster ld/st SU(1) - SU(3)
 ; CHECK: SU(1):   %{{[0-9]+}}:fpr128 = LDRQui
 ; CHECK: SU(3):   %{{[0-9]+}}:fpr128 = LDRQui
-define <2 x i64> @ldq_cluster(i64* %p) {
-  %a1 = bitcast i64* %p to <2 x i64>*
-  %tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
-  %add.ptr2 = getelementptr inbounds i64, i64* %p, i64 2
-  %a2 = bitcast i64* %add.ptr2 to <2 x i64>*
+define <2 x i64> @ldq_cluster(ptr %p) {
+  %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+  %add.ptr2 = getelementptr inbounds i64, ptr %p, i64 2
   %tmp2 = add nsw <2 x i64> %tmp1, %tmp1
-  %tmp3 = load <2 x i64>, <2 x i64>* %a2, align 8
+  %tmp3 = load <2 x i64>, ptr %add.ptr2, align 8
   %res  = mul nsw <2 x i64> %tmp2, %tmp3
   ret <2 x i64> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ldur.ll b/llvm/test/CodeGen/AArch64/arm64-ldur.ll
index cfd9bfeb599a5..0e55370a4a626 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldur.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldur.ll
@@ -1,66 +1,64 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
-define i64 @_f0(i64* %p) {
+define i64 @_f0(ptr %p) {
 ; CHECK: f0:
 ; CHECK: ldur x0, [x0, #-8]
 ; CHECK-NEXT: ret
-  %tmp = getelementptr inbounds i64, i64* %p, i64 -1
-  %ret = load i64, i64* %tmp, align 2
+  %tmp = getelementptr inbounds i64, ptr %p, i64 -1
+  %ret = load i64, ptr %tmp, align 2
   ret i64 %ret
 }
-define i32 @_f1(i32* %p) {
+define i32 @_f1(ptr %p) {
 ; CHECK: f1:
 ; CHECK: ldur w0, [x0, #-4]
 ; CHECK-NEXT: ret
-  %tmp = getelementptr inbounds i32, i32* %p, i64 -1
-  %ret = load i32, i32* %tmp, align 2
+  %tmp = getelementptr inbounds i32, ptr %p, i64 -1
+  %ret = load i32, ptr %tmp, align 2
   ret i32 %ret
 }
-define i16 @_f2(i16* %p) {
+define i16 @_f2(ptr %p) {
 ; CHECK: f2:
 ; CHECK: ldurh w0, [x0, #-2]
 ; CHECK-NEXT: ret
-  %tmp = getelementptr inbounds i16, i16* %p, i64 -1
-  %ret = load i16, i16* %tmp, align 2
+  %tmp = getelementptr inbounds i16, ptr %p, i64 -1
+  %ret = load i16, ptr %tmp, align 2
   ret i16 %ret
 }
-define i8 @_f3(i8* %p) {
+define i8 @_f3(ptr %p) {
 ; CHECK: f3:
 ; CHECK: ldurb w0, [x0, #-1]
 ; CHECK-NEXT: ret
-  %tmp = getelementptr inbounds i8, i8* %p, i64 -1
-  %ret = load i8, i8* %tmp, align 2
+  %tmp = getelementptr inbounds i8, ptr %p, i64 -1
+  %ret = load i8, ptr %tmp, align 2
   ret i8 %ret
 }
 
-define i64 @zext32(i8* %a) nounwind ssp {
+define i64 @zext32(ptr %a) nounwind ssp {
 ; CHECK-LABEL: zext32:
 ; CHECK: ldur w0, [x0, #-12]
 ; CHECK-NEXT: ret
-  %p = getelementptr inbounds i8, i8* %a, i64 -12
-  %tmp1 = bitcast i8* %p to i32*
-  %tmp2 = load i32, i32* %tmp1, align 4
+  %p = getelementptr inbounds i8, ptr %a, i64 -12
+  %tmp2 = load i32, ptr %p, align 4
   %ret = zext i32 %tmp2 to i64
 
   ret i64 %ret
 }
-define i64 @zext16(i8* %a) nounwind ssp {
+define i64 @zext16(ptr %a) nounwind ssp {
 ; CHECK-LABEL: zext16:
 ; CHECK: ldurh w0, [x0, #-12]
 ; CHECK-NEXT: ret
-  %p = getelementptr inbounds i8, i8* %a, i64 -12
-  %tmp1 = bitcast i8* %p to i16*
-  %tmp2 = load i16, i16* %tmp1, align 2
+  %p = getelementptr inbounds i8, ptr %a, i64 -12
+  %tmp2 = load i16, ptr %p, align 2
   %ret = zext i16 %tmp2 to i64
 
   ret i64 %ret
 }
-define i64 @zext8(i8* %a) nounwind ssp {
+define i64 @zext8(ptr %a) nounwind ssp {
 ; CHECK-LABEL: zext8:
 ; CHECK: ldurb w0, [x0, #-12]
 ; CHECK-NEXT: ret
-  %p = getelementptr inbounds i8, i8* %a, i64 -12
-  %tmp2 = load i8, i8* %p, align 1
+  %p = getelementptr inbounds i8, ptr %a, i64 -12
+  %tmp2 = load i8, ptr %p, align 1
   %ret = zext i8 %tmp2 to i64
 
   ret i64 %ret

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
index 014d7b6fd62b8..99f2ff04984e3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
@@ -3,11 +3,11 @@
 
 %0 = type { i64, i64 }
 
-define dso_local i128 @f0(i8* %p) nounwind readonly {
+define dso_local i128 @f0(ptr %p) nounwind readonly {
 ; CHECK-LABEL: f0:
 ; CHECK: ldxp {{x[0-9]+}}, {{x[0-9]+}}, [x0]
 entry:
-  %ldrexd = tail call %0 @llvm.aarch64.ldxp(i8* %p)
+  %ldrexd = tail call %0 @llvm.aarch64.ldxp(ptr %p)
   %0 = extractvalue %0 %ldrexd, 1
   %1 = extractvalue %0 %ldrexd, 0
   %2 = zext i64 %0 to i128
@@ -17,24 +17,24 @@ entry:
   ret i128 %4
 }
 
-define dso_local i32 @f1(i8* %ptr, i128 %val) nounwind {
+define dso_local i32 @f1(ptr %ptr, i128 %val) nounwind {
 ; CHECK-LABEL: f1:
 ; CHECK: stxp {{w[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, [x0]
 entry:
   %tmp4 = trunc i128 %val to i64
   %tmp6 = lshr i128 %val, 64
   %tmp7 = trunc i128 %tmp6 to i64
-  %strexd = tail call i32 @llvm.aarch64.stxp(i64 %tmp4, i64 %tmp7, i8* %ptr)
+  %strexd = tail call i32 @llvm.aarch64.stxp(i64 %tmp4, i64 %tmp7, ptr %ptr)
   ret i32 %strexd
 }
 
-declare %0 @llvm.aarch64.ldxp(i8*) nounwind
-declare i32 @llvm.aarch64.stxp(i64, i64, i8*) nounwind
+declare %0 @llvm.aarch64.ldxp(ptr) nounwind
+declare i32 @llvm.aarch64.stxp(i64, i64, ptr) nounwind
 
 @var = dso_local global i64 0, align 8
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_i8
-define dso_local void @test_load_i8(i8* %addr) {
+define dso_local void @test_load_i8(ptr %addr) {
 ; CHECK-LABEL: test_load_i8:
 ; CHECK: ldxrb w[[LOADVAL:[0-9]+]], [x0]
 ; CHECK-NOT: uxtb
@@ -45,15 +45,15 @@ define dso_local void @test_load_i8(i8* %addr) {
 ; GISEL: ldxrb w[[LOADVAL:[0-9]+]], [x0]
 ; GISEL-NOT: uxtb
 ; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i8) %addr)
   %shortval = trunc i64 %val to i8
   %extval = zext i8 %shortval to i64
-  store i64 %extval, i64* @var, align 8
+  store i64 %extval, ptr @var, align 8
   ret void
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_i16
-define dso_local void @test_load_i16(i16* %addr) {
+define dso_local void @test_load_i16(ptr %addr) {
 ; CHECK-LABEL: test_load_i16:
 ; CHECK: ldxrh w[[LOADVAL:[0-9]+]], [x0]
 ; CHECK-NOT: uxth
@@ -64,15 +64,15 @@ define dso_local void @test_load_i16(i16* %addr) {
 ; GISEL: ldxrh w[[LOADVAL:[0-9]+]], [x0]
 ; GISEL-NOT: uxtb
 ; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i16) %addr)
   %shortval = trunc i64 %val to i16
   %extval = zext i16 %shortval to i64
-  store i64 %extval, i64* @var, align 8
+  store i64 %extval, ptr @var, align 8
   ret void
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_i32
-define dso_local void @test_load_i32(i32* %addr) {
+define dso_local void @test_load_i32(ptr %addr) {
 ; CHECK-LABEL: test_load_i32:
 ; CHECK: ldxr w[[LOADVAL:[0-9]+]], [x0]
 ; CHECK-NOT: uxtw
@@ -83,15 +83,15 @@ define dso_local void @test_load_i32(i32* %addr) {
 ; GISEL: ldxr w[[LOADVAL:[0-9]+]], [x0]
 ; GISEL-NOT: uxtb
 ; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %addr)
   %shortval = trunc i64 %val to i32
   %extval = zext i32 %shortval to i64
-  store i64 %extval, i64* @var, align 8
+  store i64 %extval, ptr @var, align 8
   ret void
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_i64
-define dso_local void @test_load_i64(i64* %addr) {
+define dso_local void @test_load_i64(ptr %addr) {
 ; CHECK-LABEL: test_load_i64:
 ; CHECK: ldxr x[[LOADVAL:[0-9]+]], [x0]
 ; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
@@ -100,19 +100,16 @@ define dso_local void @test_load_i64(i64* %addr) {
 ; GISEL: ldxr x[[LOADVAL:[0-9]+]], [x0]
 ; GISEL-NOT: uxtb
 ; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr)
-  store i64 %val, i64* @var, align 8
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i64) %addr)
+  store i64 %val, ptr @var, align 8
   ret void
 }
 
 
-declare i64 @llvm.aarch64.ldxr.p0i8(i8*) nounwind
-declare i64 @llvm.aarch64.ldxr.p0i16(i16*) nounwind
-declare i64 @llvm.aarch64.ldxr.p0i32(i32*) nounwind
-declare i64 @llvm.aarch64.ldxr.p0i64(i64*) nounwind
+declare i64 @llvm.aarch64.ldxr.p0(ptr) nounwind
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_i8
-define dso_local i32 @test_store_i8(i32, i8 %val, i8* %addr) {
+define dso_local i32 @test_store_i8(i32, i8 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_i8:
 ; CHECK-NOT: uxtb
 ; CHECK-NOT: and
@@ -122,12 +119,12 @@ define dso_local i32 @test_store_i8(i32, i8 %val, i8* %addr) {
 ; GISEL-NOT: and
 ; GISEL: stxrb w0, w1, [x2]
   %extval = zext i8 %val to i64
-  %res = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
+  %res = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i8) %addr)
   ret i32 %res
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_i16
-define dso_local i32 @test_store_i16(i32, i16 %val, i16* %addr) {
+define dso_local i32 @test_store_i16(i32, i16 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_i16:
 ; CHECK-NOT: uxth
 ; CHECK-NOT: and
@@ -137,12 +134,12 @@ define dso_local i32 @test_store_i16(i32, i16 %val, i16* %addr) {
 ; GISEL-NOT: and
 ; GISEL: stxrh w0, w1, [x2]
   %extval = zext i16 %val to i64
-  %res = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
+  %res = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i16) %addr)
   ret i32 %res
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_i32
-define dso_local i32 @test_store_i32(i32, i32 %val, i32* %addr) {
+define dso_local i32 @test_store_i32(i32, i32 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_i32:
 ; CHECK-NOT: uxtw
 ; CHECK-NOT: and
@@ -152,24 +149,21 @@ define dso_local i32 @test_store_i32(i32, i32 %val, i32* %addr) {
 ; GISEL-NOT: and
 ; GISEL: stxr w0, w1, [x2]
   %extval = zext i32 %val to i64
-  %res = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
+  %res = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i32) %addr)
   ret i32 %res
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_i64
-define dso_local i32 @test_store_i64(i32, i64 %val, i64* %addr) {
+define dso_local i32 @test_store_i64(i32, i64 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_i64:
 ; CHECK: stxr w0, x1, [x2]
 ; GISEL-LABEL: test_store_i64:
 ; GISEL: stxr w0, x1, [x2]
-  %res = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
+  %res = call i32 @llvm.aarch64.stxr.p0(i64 %val, ptr elementtype(i64) %addr)
   ret i32 %res
 }
 
-declare i32 @llvm.aarch64.stxr.p0i8(i64, i8*) nounwind
-declare i32 @llvm.aarch64.stxr.p0i16(i64, i16*) nounwind
-declare i32 @llvm.aarch64.stxr.p0i32(i64, i32*) nounwind
-declare i32 @llvm.aarch64.stxr.p0i64(i64, i64*) nounwind
+declare i32 @llvm.aarch64.stxr.p0(i64, ptr) nounwind
 
 ; CHECK: test_clear:
 ; CHECK: clrex
@@ -180,11 +174,11 @@ define dso_local void @test_clear() {
 
 declare void @llvm.aarch64.clrex() nounwind
 
-define dso_local i128 @test_load_acquire_i128(i8* %p) nounwind readonly {
+define dso_local i128 @test_load_acquire_i128(ptr %p) nounwind readonly {
 ; CHECK-LABEL: test_load_acquire_i128:
 ; CHECK: ldaxp {{x[0-9]+}}, {{x[0-9]+}}, [x0]
 entry:
-  %ldrexd = tail call %0 @llvm.aarch64.ldaxp(i8* %p)
+  %ldrexd = tail call %0 @llvm.aarch64.ldaxp(ptr %p)
   %0 = extractvalue %0 %ldrexd, 1
   %1 = extractvalue %0 %ldrexd, 0
   %2 = zext i64 %0 to i128
@@ -194,22 +188,22 @@ entry:
   ret i128 %4
 }
 
-define dso_local i32 @test_store_release_i128(i8* %ptr, i128 %val) nounwind {
+define dso_local i32 @test_store_release_i128(ptr %ptr, i128 %val) nounwind {
 ; CHECK-LABEL: test_store_release_i128:
 ; CHECK: stlxp {{w[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, [x0]
 entry:
   %tmp4 = trunc i128 %val to i64
   %tmp6 = lshr i128 %val, 64
   %tmp7 = trunc i128 %tmp6 to i64
-  %strexd = tail call i32 @llvm.aarch64.stlxp(i64 %tmp4, i64 %tmp7, i8* %ptr)
+  %strexd = tail call i32 @llvm.aarch64.stlxp(i64 %tmp4, i64 %tmp7, ptr %ptr)
   ret i32 %strexd
 }
 
-declare %0 @llvm.aarch64.ldaxp(i8*) nounwind
-declare i32 @llvm.aarch64.stlxp(i64, i64, i8*) nounwind
+declare %0 @llvm.aarch64.ldaxp(ptr) nounwind
+declare i32 @llvm.aarch64.stlxp(i64, i64, ptr) nounwind
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i8
-define dso_local void @test_load_acquire_i8(i8* %addr) {
+define dso_local void @test_load_acquire_i8(ptr %addr) {
 ; CHECK-LABEL: test_load_acquire_i8:
 ; CHECK: ldaxrb w[[LOADVAL:[0-9]+]], [x0]
 ; CHECK-NOT: uxtb
@@ -219,15 +213,15 @@ define dso_local void @test_load_acquire_i8(i8* %addr) {
 ; GISEL-LABEL: test_load_acquire_i8:
 ; GISEL: ldaxrb w[[LOADVAL:[0-9]+]], [x0]
 ; GISEL-DAG: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i8) %addr)
   %shortval = trunc i64 %val to i8
   %extval = zext i8 %shortval to i64
-  store i64 %extval, i64* @var, align 8
+  store i64 %extval, ptr @var, align 8
   ret void
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i16
-define dso_local void @test_load_acquire_i16(i16* %addr) {
+define dso_local void @test_load_acquire_i16(ptr %addr) {
 ; CHECK-LABEL: test_load_acquire_i16:
 ; CHECK: ldaxrh w[[LOADVAL:[0-9]+]], [x0]
 ; CHECK-NOT: uxth
@@ -237,15 +231,15 @@ define dso_local void @test_load_acquire_i16(i16* %addr) {
 ; GISEL-LABEL: test_load_acquire_i16:
 ; GISEL: ldaxrh w[[LOADVAL:[0-9]+]], [x0]
 ; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) %addr)
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i16) %addr)
   %shortval = trunc i64 %val to i16
   %extval = zext i16 %shortval to i64
-  store i64 %extval, i64* @var, align 8
+  store i64 %extval, ptr @var, align 8
   ret void
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i32
-define dso_local void @test_load_acquire_i32(i32* %addr) {
+define dso_local void @test_load_acquire_i32(ptr %addr) {
 ; CHECK-LABEL: test_load_acquire_i32:
 ; CHECK: ldaxr w[[LOADVAL:[0-9]+]], [x0]
 ; CHECK-NOT: uxtw
@@ -255,15 +249,15 @@ define dso_local void @test_load_acquire_i32(i32* %addr) {
 ; GISEL-LABEL: test_load_acquire_i32:
 ; GISEL: ldaxr w[[LOADVAL:[0-9]+]], [x0]
 ; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) %addr)
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32) %addr)
   %shortval = trunc i64 %val to i32
   %extval = zext i32 %shortval to i64
-  store i64 %extval, i64* @var, align 8
+  store i64 %extval, ptr @var, align 8
   ret void
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i64
-define dso_local void @test_load_acquire_i64(i64* %addr) {
+define dso_local void @test_load_acquire_i64(ptr %addr) {
 ; CHECK-LABEL: test_load_acquire_i64:
 ; CHECK: ldaxr x[[LOADVAL:[0-9]+]], [x0]
 ; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
@@ -271,19 +265,16 @@ define dso_local void @test_load_acquire_i64(i64* %addr) {
 ; GISEL-LABEL: test_load_acquire_i64:
 ; GISEL: ldaxr x[[LOADVAL:[0-9]+]], [x0]
 ; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
-  %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr)
-  store i64 %val, i64* @var, align 8
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i64) %addr)
+  store i64 %val, ptr @var, align 8
   ret void
 }
 
 
-declare i64 @llvm.aarch64.ldaxr.p0i8(i8*) nounwind
-declare i64 @llvm.aarch64.ldaxr.p0i16(i16*) nounwind
-declare i64 @llvm.aarch64.ldaxr.p0i32(i32*) nounwind
-declare i64 @llvm.aarch64.ldaxr.p0i64(i64*) nounwind
+declare i64 @llvm.aarch64.ldaxr.p0(ptr) nounwind
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_release_i8
-define dso_local i32 @test_store_release_i8(i32, i8 %val, i8* %addr) {
+define dso_local i32 @test_store_release_i8(i32, i8 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_release_i8:
 ; CHECK-NOT: uxtb
 ; CHECK-NOT: and
@@ -293,12 +284,12 @@ define dso_local i32 @test_store_release_i8(i32, i8 %val, i8* %addr) {
 ; GISEL-NOT: and
 ; GISEL: stlxrb w0, w1, [x2]
   %extval = zext i8 %val to i64
-  %res = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
+  %res = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i8) %addr)
   ret i32 %res
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_release_i16
-define dso_local i32 @test_store_release_i16(i32, i16 %val, i16* %addr) {
+define dso_local i32 @test_store_release_i16(i32, i16 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_release_i16:
 ; CHECK-NOT: uxth
 ; CHECK-NOT: and
@@ -308,12 +299,12 @@ define dso_local i32 @test_store_release_i16(i32, i16 %val, i16* %addr) {
 ; GISEL-NOT: and
 ; GISEL: stlxrh w0, w1, [x2]
   %extval = zext i16 %val to i64
-  %res = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
+  %res = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i16) %addr)
   ret i32 %res
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_release_i32
-define dso_local i32 @test_store_release_i32(i32, i32 %val, i32* %addr) {
+define dso_local i32 @test_store_release_i32(i32, i32 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_release_i32:
 ; CHECK-NOT: uxtw
 ; CHECK-NOT: and
@@ -323,21 +314,18 @@ define dso_local i32 @test_store_release_i32(i32, i32 %val, i32* %addr) {
 ; GISEL-NOT: and
 ; GISEL: stlxr w0, w1, [x2]
   %extval = zext i32 %val to i64
-  %res = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
+  %res = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i32) %addr)
   ret i32 %res
 }
 
 ; FALLBACK-NOT: remark:{{.*}}test_store_release_i64
-define dso_local i32 @test_store_release_i64(i32, i64 %val, i64* %addr) {
+define dso_local i32 @test_store_release_i64(i32, i64 %val, ptr %addr) {
 ; CHECK-LABEL: test_store_release_i64:
 ; CHECK: stlxr w0, x1, [x2]
 ; GISEL-LABEL: test_store_release_i64:
 ; GISEL: stlxr w0, x1, [x2]
-  %res = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
+  %res = call i32 @llvm.aarch64.stlxr.p0(i64 %val, ptr elementtype(i64) %addr)
   ret i32 %res
 }
 
-declare i32 @llvm.aarch64.stlxr.p0i8(i64, i8*) nounwind
-declare i32 @llvm.aarch64.stlxr.p0i16(i64, i16*) nounwind
-declare i32 @llvm.aarch64.stlxr.p0i32(i64, i32*) nounwind
-declare i32 @llvm.aarch64.stlxr.p0i64(i64, i64*) nounwind
+declare i32 @llvm.aarch64.stlxr.p0(i64, ptr) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll
index ee0e99cd960e2..7f0bc6ac090b2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memcpy-inline.ll
@@ -20,22 +20,22 @@ entry:
 ; CHECK-DAG: stur [[REG0]], [x[[BASEREG2:[0-9]+]], #7]
 ; CHECK-DAG: ldr [[REG2:x[0-9]+]],
 ; CHECK-DAG: str [[REG2]],
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 8 @dst, ptr align 8 @src, i32 11, i1 false)
   ret i32 0
 }
 
-define void @t1(i8* nocapture %C) nounwind {
+define void @t1(ptr nocapture %C) nounwind {
 entry:
 ; CHECK-LABEL: t1:
 ; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
 ; CHECK: str [[DEST:q[0-9]+]], [x0]
 ; CHECK: ldur [[DEST:q[0-9]+]], [x[[BASEREG:[0-9]+]], #15]
 ; CHECK: stur [[DEST:q[0-9]+]], [x0, #15]
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([31 x i8], [31 x i8]* @.str1, i64 0, i64 0), i64 31, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str1, i64 31, i1 false)
   ret void
 }
 
-define void @t2(i8* nocapture %C) nounwind {
+define void @t2(ptr nocapture %C) nounwind {
 entry:
 ; CHECK-LABEL: t2:
 ; CHECK: mov [[REG3:w[0-9]+]]
@@ -43,33 +43,33 @@ entry:
 ; CHECK: str [[REG3]], [x0, #32]
 ; CHECK: ldp [[DEST1:q[0-9]+]], [[DEST2:q[0-9]+]], [x{{[0-9]+}}]
 ; CHECK: stp [[DEST1]], [[DEST2]], [x0]
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([36 x i8], [36 x i8]* @.str2, i64 0, i64 0), i64 36, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str2, i64 36, i1 false)
   ret void
 }
 
-define void @t3(i8* nocapture %C) nounwind {
+define void @t3(ptr nocapture %C) nounwind {
 entry:
 ; CHECK-LABEL: t3:
 ; CHECK: ldr [[DEST:q[0-9]+]], [x[[BASEREG]]]
 ; CHECK: str [[DEST]], [x0]
 ; CHECK: ldr [[REG4:x[0-9]+]], [x[[BASEREG:[0-9]+]], #16]
 ; CHECK: str [[REG4]], [x0, #16]
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str3, i64 0, i64 0), i64 24, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str3, i64 24, i1 false)
   ret void
 }
 
-define void @t4(i8* nocapture %C) nounwind {
+define void @t4(ptr nocapture %C) nounwind {
 entry:
 ; CHECK-LABEL: t4:
 ; CHECK: mov [[REG5:w[0-9]+]], #32
 ; CHECK: strh [[REG5]], [x0, #16]
 ; CHECK: ldr [[REG6:q[0-9]+]], [x{{[0-9]+}}]
 ; CHECK: str [[REG6]], [x0]
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str4, i64 0, i64 0), i64 18, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str4, i64 18, i1 false)
   ret void
 }
 
-define void @t5(i8* nocapture %C) nounwind {
+define void @t5(ptr nocapture %C) nounwind {
 entry:
 ; CHECK-LABEL: t5:
 ; CHECK: mov [[REG7:w[0-9]+]], #21337
@@ -78,7 +78,7 @@ entry:
 ; CHECK: mov [[REG8:w[0-9]+]],
 ; CHECK: movk [[REG8]],
 ; CHECK: str [[REG8]], [x0]
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str5, i64 0, i64 0), i64 7, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %C, ptr @.str5, i64 7, i1 false)
   ret void
 }
 
@@ -89,22 +89,20 @@ entry:
 ; CHECK-DAG: stur [[REG9]], [x{{[0-9]+}}, #6]
 ; CHECK-DAG: ldr
 ; CHECK-DAG: str
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds ([512 x i8], [512 x i8]* @spool.splbuf, i64 0, i64 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str6, i64 0, i64 0), i64 14, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr @spool.splbuf, ptr @.str6, i64 14, i1 false)
   ret void
 }
 
 %struct.Foo = type { i32, i32, i32, i32 }
 
-define void @t7(%struct.Foo* nocapture %a, %struct.Foo* nocapture %b) nounwind {
+define void @t7(ptr nocapture %a, ptr nocapture %b) nounwind {
 entry:
 ; CHECK: t7
 ; CHECK: ldr [[REG10:q[0-9]+]], [x1]
 ; CHECK: str [[REG10]], [x0]
-  %0 = bitcast %struct.Foo* %a to i8*
-  %1 = bitcast %struct.Foo* %b to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 16, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 %a, ptr align 4 %b, i32 16, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
index 1fe87a3fb5a04..ef3fb33476a04 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
@@ -1,61 +1,61 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
-define void @bzero_4_heap(i8* nocapture %c) {
+define void @bzero_4_heap(ptr nocapture %c) {
 ; CHECK-LABEL: bzero_4_heap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str wzr, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.memset.p0i8.i64(i8* align 4 %c, i8 0, i64 4, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 4 %c, i8 0, i64 4, i1 false)
   ret void
 }
 
-define void @bzero_8_heap(i8* nocapture %c) {
+define void @bzero_8_heap(ptr nocapture %c) {
 ; CHECK-LABEL: bzero_8_heap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str xzr, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 8, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %c, i8 0, i64 8, i1 false)
   ret void
 }
 
-define void @bzero_12_heap(i8* nocapture %c) {
+define void @bzero_12_heap(ptr nocapture %c) {
 ; CHECK-LABEL: bzero_12_heap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str wzr, [x0, #8]
 ; CHECK-NEXT:    str xzr, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 12, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %c, i8 0, i64 12, i1 false)
   ret void
 }
 
-define void @bzero_16_heap(i8* nocapture %c) {
+define void @bzero_16_heap(ptr nocapture %c) {
 ; CHECK-LABEL: bzero_16_heap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp xzr, xzr, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 16, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %c, i8 0, i64 16, i1 false)
   ret void
 }
 
-define void @bzero_32_heap(i8* nocapture %c) {
+define void @bzero_32_heap(ptr nocapture %c) {
 ; CHECK-LABEL: bzero_32_heap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 32, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %c, i8 0, i64 32, i1 false)
   ret void
 }
 
-define void @bzero_64_heap(i8* nocapture %c) {
+define void @bzero_64_heap(ptr nocapture %c) {
 ; CHECK-LABEL: bzero_64_heap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    stp q0, q0, [x0, #32]
 ; CHECK-NEXT:    ret
-  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 64, i1 false)
+  call void @llvm.memset.p0.i64(ptr align 8 %c, i8 0, i64 64, i1 false)
   ret void
 }
 
@@ -71,9 +71,8 @@ define void @bzero_4_stack() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %buf = alloca [4 x i8], align 1
-  %cast = bitcast [4 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 4, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 4, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -88,9 +87,8 @@ define void @bzero_8_stack() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %buf = alloca [8 x i8], align 1
-  %cast = bitcast [8 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 8, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 8, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -109,9 +107,8 @@ define void @bzero_12_stack() {
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %buf = alloca [12 x i8], align 1
-  %cast = bitcast [12 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 12, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 12, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -129,9 +126,8 @@ define void @bzero_16_stack() {
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %buf = alloca [16 x i8], align 1
-  %cast = bitcast [16 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 16, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 16, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -150,9 +146,8 @@ define void @bzero_20_stack() {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %buf = alloca [20 x i8], align 1
-  %cast = bitcast [20 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 20, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 20, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -172,9 +167,8 @@ define void @bzero_26_stack() {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %buf = alloca [26 x i8], align 1
-  %cast = bitcast [26 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 26, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 26, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -193,9 +187,8 @@ define void @bzero_32_stack() {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %buf = alloca [32 x i8], align 1
-  %cast = bitcast [32 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 32, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 32, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -215,9 +208,8 @@ define void @bzero_40_stack() {
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
   %buf = alloca [40 x i8], align 1
-  %cast = bitcast [40 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 40, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 40, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -237,9 +229,8 @@ define void @bzero_64_stack() {
 ; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
   %buf = alloca [64 x i8], align 1
-  %cast = bitcast [64 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 64, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 64, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -260,9 +251,8 @@ define void @bzero_72_stack() {
 ; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
   %buf = alloca [72 x i8], align 1
-  %cast = bitcast [72 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 72, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 72, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -284,9 +274,8 @@ define void @bzero_128_stack() {
 ; CHECK-NEXT:    add sp, sp, #144
 ; CHECK-NEXT:    ret
   %buf = alloca [128 x i8], align 1
-  %cast = bitcast [128 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 128, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 128, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -313,9 +302,8 @@ define void @bzero_256_stack() {
 ; CHECK-NEXT:    add sp, sp, #272
 ; CHECK-NEXT:    ret
   %buf = alloca [256 x i8], align 1
-  %cast = bitcast [256 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 256, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 0, i32 256, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -332,9 +320,8 @@ define void @memset_4_stack() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %buf = alloca [4 x i8], align 1
-  %cast = bitcast [4 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 4, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 4, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -350,9 +337,8 @@ define void @memset_8_stack() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %buf = alloca [8 x i8], align 1
-  %cast = bitcast [8 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 8, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 8, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -372,9 +358,8 @@ define void @memset_12_stack() {
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %buf = alloca [12 x i8], align 1
-  %cast = bitcast [12 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 12, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 12, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -393,9 +378,8 @@ define void @memset_16_stack() {
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %buf = alloca [16 x i8], align 1
-  %cast = bitcast [16 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 16, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 16, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -415,9 +399,8 @@ define void @memset_20_stack() {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %buf = alloca [20 x i8], align 1
-  %cast = bitcast [20 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 20, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 20, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -438,9 +421,8 @@ define void @memset_26_stack() {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %buf = alloca [26 x i8], align 1
-  %cast = bitcast [26 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 26, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 26, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -459,9 +441,8 @@ define void @memset_32_stack() {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %buf = alloca [32 x i8], align 1
-  %cast = bitcast [32 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 32, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 32, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -482,9 +463,8 @@ define void @memset_40_stack() {
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
   %buf = alloca [40 x i8], align 1
-  %cast = bitcast [40 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 40, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 40, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -504,9 +484,8 @@ define void @memset_64_stack() {
 ; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
   %buf = alloca [64 x i8], align 1
-  %cast = bitcast [64 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 64, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 64, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -528,9 +507,8 @@ define void @memset_72_stack() {
 ; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
   %buf = alloca [72 x i8], align 1
-  %cast = bitcast [72 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 72, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 72, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -552,9 +530,8 @@ define void @memset_128_stack() {
 ; CHECK-NEXT:    add sp, sp, #144
 ; CHECK-NEXT:    ret
   %buf = alloca [128 x i8], align 1
-  %cast = bitcast [128 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 128, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 128, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
@@ -581,12 +558,11 @@ define void @memset_256_stack() {
 ; CHECK-NEXT:    add sp, sp, #272
 ; CHECK-NEXT:    ret
   %buf = alloca [256 x i8], align 1
-  %cast = bitcast [256 x i8]* %buf to i8*
-  call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 256, i1 false)
-  call void @something(i8* %cast)
+  call void @llvm.memset.p0.i32(ptr %buf, i8 -86, i32 256, i1 false)
+  call void @something(ptr %buf)
   ret void
 }
 
-declare void @something(i8*)
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @something(ptr)
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero-pgso.ll b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero-pgso.ll
index 086592bf13212..48a05e62e7479 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero-pgso.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero-pgso.ll
@@ -6,21 +6,21 @@
 ; For small size (<= 256), we do not change memset to bzero.
 ; CHECK-DARWIN: {{b|bl}} _memset
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct1(i8* nocapture %ptr) !prof !14 {
+define void @fct1(ptr nocapture %ptr) !prof !14 {
 entry:
-  tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 256, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 
 ; CHECK-LABEL: fct2:
 ; When the size is bigger than 256, change into bzero.
 ; CHECK-DARWIN: {{b|bl}} _bzero
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct2(i8* nocapture %ptr) !prof !14 {
+define void @fct2(ptr nocapture %ptr) !prof !14 {
 entry:
-  tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 257, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 257, i1 false)
   ret void
 }
 
@@ -28,10 +28,10 @@ entry:
 ; For unknown size, change to bzero.
 ; CHECK-DARWIN: {{b|bl}} _bzero
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct3(i8* nocapture %ptr, i32 %unknown) !prof !14 {
+define void @fct3(ptr nocapture %ptr, i32 %unknown) !prof !14 {
 entry:
   %conv = sext i32 %unknown to i64
-  tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 %conv, i1 false)
   ret void
 }
 
@@ -39,25 +39,25 @@ entry:
 ; Size <= 256, no change.
 ; CHECK-DARWIN: {{b|bl}} _memset
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct4(i8* %ptr) !prof !14 {
+define void @fct4(ptr %ptr) !prof !14 {
 entry:
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 256, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 0, i64 256, i64 %tmp)
   ret void
 }
 
-declare i8* @__memset_chk(i8*, i32, i64, i64)
+declare ptr @__memset_chk(ptr, i32, i64, i64)
 
-declare i64 @llvm.objectsize.i64(i8*, i1)
+declare i64 @llvm.objectsize.i64(ptr, i1)
 
 ; CHECK-LABEL: fct5:
 ; Size > 256, change.
 ; CHECK-DARWIN: {{b|bl}} _bzero
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct5(i8* %ptr) !prof !14 {
+define void @fct5(ptr %ptr) !prof !14 {
 entry:
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 257, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 0, i64 257, i64 %tmp)
   ret void
 }
 
@@ -65,11 +65,11 @@ entry:
 ; Size = unknown, change.
 ; CHECK-DARWIN: {{b|bl}} _bzero
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct6(i8* %ptr, i32 %unknown) !prof !14 {
+define void @fct6(ptr %ptr, i32 %unknown) !prof !14 {
 entry:
   %conv = sext i32 %unknown to i64
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 %conv, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 0, i64 %conv, i64 %tmp)
   ret void
 }
 
@@ -80,10 +80,10 @@ entry:
 ; memset with something that is not a zero, no change.
 ; CHECK-DARWIN: {{b|bl}} _memset
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct7(i8* %ptr) !prof !14 {
+define void @fct7(ptr %ptr) !prof !14 {
 entry:
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 256, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 1, i64 256, i64 %tmp)
   ret void
 }
 
@@ -91,10 +91,10 @@ entry:
 ; memset with something that is not a zero, no change.
 ; CHECK-DARWIN: {{b|bl}} _memset
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct8(i8* %ptr) !prof !14 {
+define void @fct8(ptr %ptr) !prof !14 {
 entry:
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 257, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 1, i64 257, i64 %tmp)
   ret void
 }
 
@@ -102,11 +102,11 @@ entry:
 ; memset with something that is not a zero, no change.
 ; CHECK-DARWIN: {{b|bl}} _memset
 ; CHECK-LINUX: {{b|bl}} memset
-define void @fct9(i8* %ptr, i32 %unknown) !prof !14 {
+define void @fct9(ptr %ptr, i32 %unknown) !prof !14 {
 entry:
   %conv = sext i32 %unknown to i64
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 %conv, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 1, i64 %conv, i64 %tmp)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
index 6be7b3822a711..a18e10eabb663 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
@@ -2,14 +2,14 @@
 ; RUN: llc %s -enable-machine-outliner=never -mtriple=arm64-linux-gnu    -o - | FileCheck %s --check-prefix=LINUX
 ; <rdar://problem/14199482> ARM64: Calls to bzero() replaced with calls to memset()
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 
 ; CHECK-LABEL: fct1:
 ; Constant size memset to zero.
 ; DARWIN: {{b|bl}} _bzero
 ; LINUX: {{b|bl}} memset
-define void @fct1(i8* nocapture %ptr) minsize {
-  tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i1 false)
+define void @fct1(ptr nocapture %ptr) minsize {
+  tail call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 256, i1 false)
   ret void
 }
 
@@ -17,9 +17,9 @@ define void @fct1(i8* nocapture %ptr) minsize {
 ; Variable size memset to zero.
 ; DARWIN: {{b|bl}} _bzero
 ; LINUX: {{b|bl}} memset
-define void @fct3(i8* nocapture %ptr, i32 %unknown) minsize {
+define void @fct3(ptr nocapture %ptr, i32 %unknown) minsize {
   %conv = sext i32 %unknown to i64
-  tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i1 false)
+  tail call void @llvm.memset.p0.i64(ptr %ptr, i8 0, i64 %conv, i1 false)
   ret void
 }
 
@@ -27,24 +27,24 @@ define void @fct3(i8* nocapture %ptr, i32 %unknown) minsize {
 ; Variable size checked memset to zero.
 ; DARWIN: {{b|bl}} _bzero
 ; LINUX: {{b|bl}} memset
-define void @fct4(i8* %ptr) minsize {
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 256, i64 %tmp)
+define void @fct4(ptr %ptr) minsize {
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 0, i64 256, i64 %tmp)
   ret void
 }
 
-declare i8* @__memset_chk(i8*, i32, i64, i64)
+declare ptr @__memset_chk(ptr, i32, i64, i64)
 
-declare i64 @llvm.objectsize.i64(i8*, i1)
+declare i64 @llvm.objectsize.i64(ptr, i1)
 
 ; CHECK-LABEL: fct6:
 ; Size = unknown, change.
 ; DARWIN: {{b|bl}} _bzero
 ; LINUX: {{b|bl}} memset
-define void @fct6(i8* %ptr, i32 %unknown) minsize {
+define void @fct6(ptr %ptr, i32 %unknown) minsize {
   %conv = sext i32 %unknown to i64
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 %conv, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 0, i64 %conv, i64 %tmp)
   ret void
 }
 
@@ -55,9 +55,9 @@ define void @fct6(i8* %ptr, i32 %unknown) minsize {
 ; memset with something that is not a zero, no change.
 ; DARWIN: {{b|bl}} _memset
 ; LINUX: {{b|bl}} memset
-define void @fct7(i8* %ptr) minsize {
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 256, i64 %tmp)
+define void @fct7(ptr %ptr) minsize {
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 1, i64 256, i64 %tmp)
   ret void
 }
 
@@ -65,9 +65,9 @@ define void @fct7(i8* %ptr) minsize {
 ; memset with something that is not a zero, no change.
 ; DARWIN: {{b|bl}} _memset
 ; LINUX: {{b|bl}} memset
-define void @fct8(i8* %ptr) minsize {
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 257, i64 %tmp)
+define void @fct8(ptr %ptr) minsize {
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 1, i64 257, i64 %tmp)
   ret void
 }
 
@@ -75,9 +75,9 @@ define void @fct8(i8* %ptr) minsize {
 ; memset with something that is not a zero, no change.
 ; DARWIN: {{b|bl}} _memset
 ; LINUX: {{b|bl}} memset
-define void @fct9(i8* %ptr, i32 %unknown) minsize {
+define void @fct9(ptr %ptr, i32 %unknown) minsize {
   %conv = sext i32 %unknown to i64
-  %tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
-  %call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 %conv, i64 %tmp)
+  %tmp = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false)
+  %call = tail call ptr @__memset_chk(ptr %ptr, i32 1, i64 %conv, i64 %tmp)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll b/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
index 5cdcbf14292c3..ab314702463d2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
@@ -3,7 +3,7 @@
 
 ; Small (16 bytes here) unaligned memcpy() should be a function call if
 ; strict-alignment is turned on.
-define void @t0(i8* %out, i8* %in) {
+define void @t0(ptr %out, ptr %in) {
 ; CHECK-LABEL: t0:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -15,26 +15,26 @@ define void @t0(i8* %out, i8* %in) {
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 16, i1 false)
   ret void
 }
 
 ; Small (16 bytes here) aligned memcpy() should be inlined even if
 ; strict-alignment is turned on.
-define void @t1(i8* align 8 %out, i8* align 8 %in) {
+define void @t1(ptr align 8 %out, ptr align 8 %in) {
 ; CHECK-LABEL: t1:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldp x9, x8, [x1]
 ; CHECK-NEXT:    stp x9, x8, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %out, i8* align 8 %in, i64 16, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %out, ptr align 8 %in, i64 16, i1 false)
   ret void
 }
 
 ; Tiny (4 bytes here) unaligned memcpy() should be inlined with byte sized
 ; loads and stores if strict-alignment is turned on.
-define void @t2(i8* %out, i8* %in) {
+define void @t2(ptr %out, ptr %in) {
 ; CHECK-LABEL: t2:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldrb w8, [x1, #3]
@@ -47,8 +47,8 @@ define void @t2(i8* %out, i8* %in) {
 ; CHECK-NEXT:    strb w11, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 4, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
index d53c9f270679b..061d1161ba133 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
@@ -15,11 +15,11 @@
 ; CHECK: SU(4):   STRWui $wzr, %1:gpr64common, 0 :: (store (s32) into %ir.ptr2)
 ; CHECK: SU(5):   $w0 = COPY %2
 ; CHECK: ** ScheduleDAGMI::schedule picking next node
-define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
+define i32 @misched_bug(ptr %ptr1, ptr %ptr2) {
 entry:
-  %ptr1_plus1 = getelementptr inbounds i32, i32* %ptr1, i64 1
-  %val1 = load i32, i32* %ptr1_plus1, align 4
-  store i32 0, i32* %ptr1, align 4
-  store i32 0, i32* %ptr2, align 4
+  %ptr1_plus1 = getelementptr inbounds i32, ptr %ptr1, i64 1
+  %val1 = load i32, ptr %ptr1_plus1, align 4
+  store i32 0, ptr %ptr1, align 4
+  store i32 0, ptr %ptr2, align 4
   ret i32 %val1
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-misched-multimmo.ll b/llvm/test/CodeGen/AArch64/arm64-misched-multimmo.ll
index 98152fea30696..2f14bfb70e330 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-multimmo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-multimmo.ll
@@ -15,9 +15,9 @@
 ; CHECK: SU(4):   STRWui $wzr, renamable $x{{[0-9]+}}
 define dso_local i32 @foo() {
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 0), align 4
-  %1 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 1), align 4
-  store i32 0, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G1, i64 0, i64 0), align 4
+  %0 = load i32, ptr @G2, align 4
+  %1 = load i32, ptr getelementptr inbounds ([100 x i32], ptr @G2, i64 0, i64 1), align 4
+  store i32 0, ptr @G1, align 4
   %add = add nsw i32 %1, %0
   ret i32 %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-mte.ll b/llvm/test/CodeGen/AArch64/arm64-mte.ll
index cf53afdaf29cf..d78f4eb830e10 100644
--- a/llvm/test/CodeGen/AArch64/arm64-mte.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-mte.ll
@@ -1,150 +1,124 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -mattr=+mte | FileCheck %s
 
 ; test create_tag
-define i32* @create_tag(i32* %ptr, i32 %m) {
+define ptr @create_tag(ptr %ptr, i32 %m) {
 entry:
 ; CHECK-LABEL: create_tag:
-  %0 = bitcast i32* %ptr to i8*
-  %1 = zext i32 %m to i64
-  %2 = tail call i8* @llvm.aarch64.irg(i8* %0, i64 %1)
-  %3 = bitcast i8* %2 to i32*
-  ret i32* %3
+  %0 = zext i32 %m to i64
+  %1 = tail call ptr @llvm.aarch64.irg(ptr %ptr, i64 %0)
+  ret ptr %1
 ;CHECK: irg x0, x0, {{x[0-9]+}}
 }
 
 ; *********** __arm_mte_increment_tag  *************
 ; test increment_tag1
-define i32* @increment_tag1(i32* %ptr) {
+define ptr @increment_tag1(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag1:
-  %0 = bitcast i32* %ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.addg(i8* %0, i64 7)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %0 = tail call ptr @llvm.aarch64.addg(ptr %ptr, i64 7)
+  ret ptr %0
 ; CHECK: addg x0, x0, #0, #7
 }
 
 %struct.S2K = type { [512 x i32] }
-define i32* @increment_tag1stack(i32* %ptr) {
+define ptr @increment_tag1stack(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag1stack:
   %s = alloca %struct.S2K, align 4
-  %0 = bitcast %struct.S2K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2048, i8* nonnull %0)
-  %1 = call i8* @llvm.aarch64.addg(i8* nonnull %0, i64 7)
-  %2 = bitcast i8* %1 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 2048, i8* nonnull %0)
-  ret i32* %2
+  call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
+  %0 = call ptr @llvm.aarch64.addg(ptr nonnull %s, i64 7)
+  call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: addg x0, sp, #0, #7
 }
 
 
-define i32* @increment_tag2(i32* %ptr) {
+define ptr @increment_tag2(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag2:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 4
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.addg(i8* nonnull %0, i64 7)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 4
+  %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
+  ret ptr %0
 ; CHECK: addg x0, x0, #16, #7
 }
 
-define i32* @increment_tag2stack(i32* %ptr) {
+define ptr @increment_tag2stack(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag2stack:
   %s = alloca %struct.S2K, align 4
-  %0 = bitcast %struct.S2K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2048, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S2K, %struct.S2K* %s, i64 0, i32 0, i64 4
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.addg(i8* nonnull %1, i64 7)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 2048, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 4
+  %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
+  call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: addg x0, sp, #16, #7
 }
 
-define i32* @increment_tag3(i32* %ptr) {
+define ptr @increment_tag3(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag3:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 252
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.addg(i8* nonnull %0, i64 7)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 252
+  %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
+  ret ptr %0
 ; CHECK: addg x0, x0, #1008, #7
 }
 
-define i32* @increment_tag3stack(i32* %ptr) {
+define ptr @increment_tag3stack(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag3stack:
   %s = alloca %struct.S2K, align 4
-  %0 = bitcast %struct.S2K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2048, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S2K, %struct.S2K* %s, i64 0, i32 0, i64 252
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.addg(i8* nonnull %1, i64 7)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 2048, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 252
+  %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
+  call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: addg x0, sp, #1008, #7
 }
 
 
-define i32* @increment_tag4(i32* %ptr) {
+define ptr @increment_tag4(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag4:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 256
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.addg(i8* nonnull %0, i64 7)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 256
+  %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
+  ret ptr %0
 ; CHECK: add [[T0:x[0-9]+]], x0, #1024
 ; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
 
-define i32* @increment_tag4stack(i32* %ptr) {
+define ptr @increment_tag4stack(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag4stack:
   %s = alloca %struct.S2K, align 4
-  %0 = bitcast %struct.S2K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2048, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S2K, %struct.S2K* %s, i64 0, i32 0, i64 256
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.addg(i8* nonnull %1, i64 7)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 2048, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 256
+  %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
+  call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: add [[T0:x[0-9]+]], {{.*}}, #1024
 ; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
 
 
-define i32* @increment_tag5(i32* %ptr) {
+define ptr @increment_tag5(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag5:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 5
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.addg(i8* nonnull %0, i64 7)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 5
+  %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
+  ret ptr %0
 ; CHECK: add [[T0:x[0-9]+]], x0, #20
 ; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
 
-define i32* @increment_tag5stack(i32* %ptr) {
+define ptr @increment_tag5stack(ptr %ptr) {
 entry:
 ; CHECK-LABEL: increment_tag5stack:
   %s = alloca %struct.S2K, align 4
-  %0 = bitcast %struct.S2K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 2048, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S2K, %struct.S2K* %s, i64 0, i32 0, i64 5
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.addg(i8* nonnull %1, i64 7)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 2048, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 5
+  %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
+  call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: add [[T0:x[0-9]+]], {{.*}}, #20
 ; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
@@ -152,13 +126,12 @@ entry:
 
 ; *********** __arm_mte_exclude_tag  *************
 ; test exclude_tag
-define i32 @exclude_tag(i32* %ptr, i32 %m) local_unnamed_addr #0 {
+define i32 @exclude_tag(ptr %ptr, i32 %m) local_unnamed_addr #0 {
 entry:
 ;CHECK-LABEL: exclude_tag:
   %0 = zext i32 %m to i64
-  %1 = bitcast i32* %ptr to i8*
-  %2 = tail call i64 @llvm.aarch64.gmi(i8* %1, i64 %0)
-  %conv = trunc i64 %2 to i32
+  %1 = tail call i64 @llvm.aarch64.gmi(ptr %ptr, i64 %0)
+  %conv = trunc i64 %1 to i32
   ret i32 %conv
 ; CHECK: gmi	x0, x0, {{x[0-9]+}}
 }
@@ -166,170 +139,140 @@ entry:
 
 ; *********** __arm_mte_get_tag *************
 %struct.S8K = type { [2048 x i32] }
-define i32* @get_tag1(i32* %ptr) {
+define ptr @get_tag1(ptr %ptr) {
 entry:
 ; CHECK-LABEL: get_tag1:
-  %0 = bitcast i32* %ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.ldg(i8* %0, i8* %0)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %0 = tail call ptr @llvm.aarch64.ldg(ptr %ptr, ptr %ptr)
+  ret ptr %0
 ; CHECK: ldg x0, [x0]
 }
 
-define i32* @get_tag1_two_parm(i32* %ret_ptr, i32* %ptr) {
+define ptr @get_tag1_two_parm(ptr %ret_ptr, ptr %ptr) {
 entry:
 ; CHECK-LABEL: get_tag1_two_parm:
-  %0 = bitcast i32* %ret_ptr to i8*
-  %1 = bitcast i32* %ptr to i8*
-  %2 = tail call i8* @llvm.aarch64.ldg(i8* %0, i8* %1)
-  %3 = bitcast i8* %2 to i32*
-  ret i32* %3
+  %0 = tail call ptr @llvm.aarch64.ldg(ptr %ret_ptr, ptr %ptr)
+  ret ptr %0
 ; CHECK: ldg x0, [x1]
 }
 
-define i32* @get_tag1stack() {
+define ptr @get_tag1stack() {
 entry:
 ; CHECK-LABEL: get_tag1stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %1 = call i8* @llvm.aarch64.ldg(i8* nonnull %0, i8* nonnull %0)
-  %2 = bitcast i8* %1 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
-  ret i32* %2
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %s, ptr nonnull %s)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: mov [[T0:x[0-9]+]], sp
 ; CHECK: ldg [[T0]], [sp]
 }
 
-define i32* @get_tag1stack_two_param(i32* %ret_ptr) {
+define ptr @get_tag1stack_two_param(ptr %ret_ptr) {
 entry:
 ; CHECK-LABEL: get_tag1stack_two_param:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  %1 = bitcast i32*  %ret_ptr to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %2 = call i8* @llvm.aarch64.ldg(i8* nonnull %1, i8* nonnull %0)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %ret_ptr, ptr nonnull %s)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
+  ret ptr %0
 ; CHECK-NOT: mov {{.*}}, sp
 ; CHECK: ldg x0, [sp]
 }
 
 
-define i32* @get_tag2(i32* %ptr) {
+define ptr @get_tag2(ptr %ptr) {
 entry:
 ; CHECK-LABEL: get_tag2:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 4
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.ldg(i8* nonnull %0, i8* nonnull %0)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 4
+  %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
+  ret ptr %0
 ; CHECK: add  [[T0:x[0-9]+]], x0, #16
 ; CHECK: ldg  [[T0]], [x0, #16]
 }
 
-define i32* @get_tag2stack() {
+define ptr @get_tag2stack() {
 entry:
 ; CHECK-LABEL: get_tag2stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 4
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.ldg(i8* nonnull %1, i8* nonnull %1)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 4
+  %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: mov [[T0:x[0-9]+]], sp
 ; CHECK: add x0, [[T0]], #16
 ; CHECK: ldg x0, [sp, #16]
 }
 
 
-define i32* @get_tag3(i32* %ptr) {
+define ptr @get_tag3(ptr %ptr) {
 entry:
 ; CHECK-LABEL: get_tag3:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 1020
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.ldg(i8* nonnull %0, i8* nonnull %0)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1020
+  %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
+  ret ptr %0
 ; CHECK: add [[T0:x[0-8]+]], x0, #4080
 ; CHECK: ldg [[T0]], [x0, #4080]
 }
 
-define i32* @get_tag3stack() {
+define ptr @get_tag3stack() {
 entry:
 ; CHECK-LABEL: get_tag3stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 1020
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.ldg(i8* nonnull %1, i8* nonnull %1)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1020
+  %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: mov [[T0:x[0-9]+]], sp
 ; CHECK: add x0, [[T0]], #4080
 ; CHECK: ldg x0, [sp, #4080]
 }
 
 
-define i32* @get_tag4(i32* %ptr) {
+define ptr @get_tag4(ptr %ptr) {
 entry:
 ; CHECK-LABEL: get_tag4:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 1024
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.ldg(i8* nonnull %0, i8* nonnull %0)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1024
+  %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
+  ret ptr %0
 ; CHECK: add x0, x0, #1, lsl #12
 ; CHECK-NEXT: ldg x0, [x0]
 }
 
-define i32* @get_tag4stack() {
+define ptr @get_tag4stack() {
 entry:
 ; CHECK-LABEL: get_tag4stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 1024
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.ldg(i8* nonnull %1, i8* nonnull %1)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1024
+  %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: mov [[T0:x[0-9]+]], sp
 ; CHECK-NEXT: add x[[T1:[0-9]+]], [[T0]], #1, lsl #12
 ; CHECK-NEXT: ldg x[[T1]], [x[[T1]]]
 }
 
-define i32* @get_tag5(i32* %ptr) {
+define ptr @get_tag5(ptr %ptr) {
 entry:
 ; CHECK-LABEL: get_tag5:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 5
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = tail call i8* @llvm.aarch64.ldg(i8* nonnull %0, i8* nonnull %0)
-  %2 = bitcast i8* %1 to i32*
-  ret i32* %2
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 5
+  %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
+  ret ptr %0
 ; CHECK: add x0, x0, #20
 ; CHECK-NEXT: ldg x0, [x0]
 }
 
-define i32* @get_tag5stack() {
+define ptr @get_tag5stack() {
 entry:
 ; CHECK-LABEL: get_tag5stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 5
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = call i8* @llvm.aarch64.ldg(i8* nonnull %1, i8* nonnull %1)
-  %3 = bitcast i8* %2 to i32*
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
-  ret i32* %3
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 5
+  %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
+  ret ptr %0
 ; CHECK: mov [[T0:x[0-9]+]], sp
 ; CHECK: add x[[T1:[0-9]+]], [[T0]], #20
 ; CHECK-NEXT: ldg x[[T1]], [x[[T1]]]
@@ -337,138 +280,114 @@ entry:
 
 
 ; *********** __arm_mte_set_tag  *************
-define void @set_tag1(i32* %tag, i32* %ptr) {
+define void @set_tag1(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag1:
-  %0 = bitcast i32* %tag to i8*
-  %1 = bitcast i32* %ptr to i8*
-  tail call void @llvm.aarch64.stg(i8* %0, i8* %1)
+  tail call void @llvm.aarch64.stg(ptr %tag, ptr %ptr)
   ret void
 ; CHECK: stg x0, [x1]
 }
 
-define void @set_tag1stack(i32* %tag) {
+define void @set_tag1stack(ptr %tag) {
 entry:
 ; CHECK-LABEL: set_tag1stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast i32* %tag to i8*
-  %1 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %1)
-  call void @llvm.aarch64.stg(i8* %0, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %s)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %tag)
   ret void
 ; CHECK: stg x0, [sp]
 }
 
 
-define void @set_tag2(i32* %tag, i32* %ptr) {
+define void @set_tag2(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag2:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 4
-  %0 = bitcast i32* %tag to i8*
-  %1 = bitcast i32* %add.ptr to i8*
-  tail call void @llvm.aarch64.stg(i8* %0, i8* %1)
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 4
+  tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
 ; CHECK: stg x0, [x1, #16]
 }
 
-define void @set_tag2stack(i32* %tag, i32* %ptr) {
+define void @set_tag2stack(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag2stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 4
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = bitcast i32* %tag to i8*
-  call void @llvm.aarch64.stg(i8* %2, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 4
+  call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
 ; CHECK: stg x0, [sp, #16]
 }
 
 
 
-define void @set_tag3(i32* %tag, i32* %ptr) {
+define void @set_tag3(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag3:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 1020
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = bitcast i32* %tag to i8*
-  tail call void @llvm.aarch64.stg(i8* %1, i8* %0)
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1020
+  tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
 ; CHECK: stg x0, [x1, #4080]
 }
 
-define void @set_tag3stack(i32* %tag, i32* %ptr) {
+define void @set_tag3stack(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag3stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 1020
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = bitcast i32* %tag to i8*
-  call void @llvm.aarch64.stg(i8* %2, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1020
+  call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
 ; CHECK: stg x0, [sp, #4080]
 }
 
 
 
-define void @set_tag4(i32* %tag, i32* %ptr) {
+define void @set_tag4(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag4:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 1024
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = bitcast i32* %tag to i8*
-  tail call void @llvm.aarch64.stg(i8* %1, i8* %0)
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1024
+  tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
 ; CHECK: add x[[T0:[0-9]+]], x1, #1, lsl #12
 ; CHECK-NEXT: stg x0, [x[[T0]]]
 }
 
-define void @set_tag4stack(i32* %tag, i32* %ptr) {
+define void @set_tag4stack(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag4stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 1024
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = bitcast i32* %tag to i8*
-  call void @llvm.aarch64.stg(i8* %2, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1024
+  call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
 ; CHECK: add x[[T0:[0-9]+]], {{.*}}, #1, lsl #12
 ; CHECK-NEXT: stg x0, [x[[T0]]]
 }
 
 
-define void @set_tag5(i32* %tag, i32* %ptr) {
+define void @set_tag5(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag5:
-  %add.ptr = getelementptr inbounds i32, i32* %ptr, i64 5
-  %0 = bitcast i32* %add.ptr to i8*
-  %1 = bitcast i32* %tag to i8*
-  tail call void @llvm.aarch64.stg(i8* %1, i8* %0)
+  %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 5
+  tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
 ; CHECK: add x[[T0:[0-9]+]], x1, #20
 ; CHECK-NEXT: stg x0, [x[[T0]]]
 }
 
-define void @set_tag5stack(i32* %tag, i32* %ptr) {
+define void @set_tag5stack(ptr %tag, ptr %ptr) {
 entry:
 ; CHECK-LABEL: set_tag5stack:
   %s = alloca %struct.S8K, align 4
-  %0 = bitcast %struct.S8K* %s to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8192, i8* nonnull %0)
-  %arrayidx = getelementptr inbounds %struct.S8K, %struct.S8K* %s, i64 0, i32 0, i64 5
-  %1 = bitcast i32* %arrayidx to i8*
-  %2 = bitcast i32* %tag to i8*
-  call void @llvm.aarch64.stg(i8* %2, i8* nonnull %1)
-  call void @llvm.lifetime.end.p0i8(i64 8192, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
+  %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 5
+  call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
+  call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
 ; CHECK: add x[[T0:[0-9]+]], {{.*}}, #20
 ; CHECK-NEXT: stg x0, [x[[T0]]]
@@ -476,22 +395,20 @@ entry:
 
 
 ; *********** __arm_mte_ptr
diff   *************
-define i64 @subtract_pointers(i32* %ptra, i32* %ptrb) {
+define i64 @subtract_pointers(ptr %ptra, ptr %ptrb) {
 entry:
 ; CHECK-LABEL: subtract_pointers:
-  %0 = bitcast i32* %ptra to i8*
-  %1 = bitcast i32* %ptrb to i8*
-  %2 = tail call i64 @llvm.aarch64.subp(i8* %0, i8* %1)
-  ret i64 %2
+  %0 = tail call i64 @llvm.aarch64.subp(ptr %ptra, ptr %ptrb)
+  ret i64 %0
 ; CHECK: subp x0, x0, x1
 }
 
-declare i8* @llvm.aarch64.irg(i8*, i64)
-declare i8* @llvm.aarch64.addg(i8*, i64)
-declare i64 @llvm.aarch64.gmi(i8*, i64)
-declare i8* @llvm.aarch64.ldg(i8*, i8*)
-declare void @llvm.aarch64.stg(i8*, i8*)
-declare i64 @llvm.aarch64.subp(i8*, i8*)
+declare ptr @llvm.aarch64.irg(ptr, i64)
+declare ptr @llvm.aarch64.addg(ptr, i64)
+declare i64 @llvm.aarch64.gmi(ptr, i64)
+declare ptr @llvm.aarch64.ldg(ptr, ptr)
+declare void @llvm.aarch64.stg(ptr, ptr)
+declare i64 @llvm.aarch64.subp(ptr, ptr)
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll b/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll
index b48f3b46cb46b..d4abdd91295a5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-narrow-st-merge.ll
@@ -6,15 +6,15 @@
 ; CHECK-STRICT-LABEL: Strh_zero
 ; CHECK-STRICT: strh wzr
 ; CHECK-STRICT: strh wzr
-define void @Strh_zero(i16* nocapture %P, i32 %n) {
+define void @Strh_zero(ptr nocapture %P, i32 %n) {
 entry:
   %idxprom = sext i32 %n to i64
-  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
-  store i16 0, i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %P, i64 %idxprom
+  store i16 0, ptr %arrayidx
   %add = add nsw i32 %n, 1
   %idxprom1 = sext i32 %add to i64
-  %arrayidx2 = getelementptr inbounds i16, i16* %P, i64 %idxprom1
-  store i16 0, i16* %arrayidx2
+  %arrayidx2 = getelementptr inbounds i16, ptr %P, i64 %idxprom1
+  store i16 0, ptr %arrayidx2
   ret void
 }
 
@@ -25,23 +25,23 @@ entry:
 ; CHECK-STRICT: strh wzr
 ; CHECK-STRICT: strh wzr
 ; CHECK-STRICT: strh wzr
-define void @Strh_zero_4(i16* nocapture %P, i32 %n) {
+define void @Strh_zero_4(ptr nocapture %P, i32 %n) {
 entry:
   %idxprom = sext i32 %n to i64
-  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
-  store i16 0, i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %P, i64 %idxprom
+  store i16 0, ptr %arrayidx
   %add = add nsw i32 %n, 1
   %idxprom1 = sext i32 %add to i64
-  %arrayidx2 = getelementptr inbounds i16, i16* %P, i64 %idxprom1
-  store i16 0, i16* %arrayidx2
+  %arrayidx2 = getelementptr inbounds i16, ptr %P, i64 %idxprom1
+  store i16 0, ptr %arrayidx2
   %add3 = add nsw i32 %n, 2
   %idxprom4 = sext i32 %add3 to i64
-  %arrayidx5 = getelementptr inbounds i16, i16* %P, i64 %idxprom4
-  store i16 0, i16* %arrayidx5
+  %arrayidx5 = getelementptr inbounds i16, ptr %P, i64 %idxprom4
+  store i16 0, ptr %arrayidx5
   %add6 = add nsw i32 %n, 3
   %idxprom7 = sext i32 %add6 to i64
-  %arrayidx8 = getelementptr inbounds i16, i16* %P, i64 %idxprom7
-  store i16 0, i16* %arrayidx8
+  %arrayidx8 = getelementptr inbounds i16, ptr %P, i64 %idxprom7
+  store i16 0, ptr %arrayidx8
   ret void
 }
 
@@ -49,29 +49,29 @@ entry:
 ; CHECK: str xzr
 ; CHECK-STRICT-LABEL: Strw_zero
 ; CHECK-STRICT: stp wzr, wzr
-define void @Strw_zero(i32* nocapture %P, i32 %n) {
+define void @Strw_zero(ptr nocapture %P, i32 %n) {
 entry:
   %idxprom = sext i32 %n to i64
-  %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
-  store i32 0, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+  store i32 0, ptr %arrayidx
   %add = add nsw i32 %n, 1
   %idxprom1 = sext i32 %add to i64
-  %arrayidx2 = getelementptr inbounds i32, i32* %P, i64 %idxprom1
-  store i32 0, i32* %arrayidx2
+  %arrayidx2 = getelementptr inbounds i32, ptr %P, i64 %idxprom1
+  store i32 0, ptr %arrayidx2
   ret void
 }
 
 ; CHECK-LABEL: Strw_zero_nonzero
 ; CHECK: stp wzr, w1
-define void @Strw_zero_nonzero(i32* nocapture %P, i32 %n)  {
+define void @Strw_zero_nonzero(ptr nocapture %P, i32 %n)  {
 entry:
   %idxprom = sext i32 %n to i64
-  %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
-  store i32 0, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+  store i32 0, ptr %arrayidx
   %add = add nsw i32 %n, 1
   %idxprom1 = sext i32 %add to i64
-  %arrayidx2 = getelementptr inbounds i32, i32* %P, i64 %idxprom1
-  store i32 %n, i32* %arrayidx2
+  %arrayidx2 = getelementptr inbounds i32, ptr %P, i64 %idxprom1
+  store i32 %n, ptr %arrayidx2
   ret void
 }
 
@@ -80,23 +80,23 @@ entry:
 ; CHECK-STRICT-LABEL: Strw_zero_4
 ; CHECK-STRICT: stp wzr, wzr
 ; CHECK-STRICT: stp wzr, wzr
-define void @Strw_zero_4(i32* nocapture %P, i32 %n) {
+define void @Strw_zero_4(ptr nocapture %P, i32 %n) {
 entry:
   %idxprom = sext i32 %n to i64
-  %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
-  store i32 0, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+  store i32 0, ptr %arrayidx
   %add = add nsw i32 %n, 1
   %idxprom1 = sext i32 %add to i64
-  %arrayidx2 = getelementptr inbounds i32, i32* %P, i64 %idxprom1
-  store i32 0, i32* %arrayidx2
+  %arrayidx2 = getelementptr inbounds i32, ptr %P, i64 %idxprom1
+  store i32 0, ptr %arrayidx2
   %add3 = add nsw i32 %n, 2
   %idxprom4 = sext i32 %add3 to i64
-  %arrayidx5 = getelementptr inbounds i32, i32* %P, i64 %idxprom4
-  store i32 0, i32* %arrayidx5
+  %arrayidx5 = getelementptr inbounds i32, ptr %P, i64 %idxprom4
+  store i32 0, ptr %arrayidx5
   %add6 = add nsw i32 %n, 3
   %idxprom7 = sext i32 %add6 to i64
-  %arrayidx8 = getelementptr inbounds i32, i32* %P, i64 %idxprom7
-  store i32 0, i32* %arrayidx8
+  %arrayidx8 = getelementptr inbounds i32, ptr %P, i64 %idxprom7
+  store i32 0, ptr %arrayidx8
   ret void
 }
 
@@ -105,16 +105,16 @@ entry:
 ; CHECK-STRICT-LABEL: Sturb_zero
 ; CHECK-STRICT: sturb wzr
 ; CHECK-STRICT: sturb wzr
-define void @Sturb_zero(i8* nocapture %P, i32 %n) #0 {
+define void @Sturb_zero(ptr nocapture %P, i32 %n) #0 {
 entry:
   %sub = add nsw i32 %n, -2
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds i8, i8* %P, i64 %idxprom
-  store i8 0, i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %P, i64 %idxprom
+  store i8 0, ptr %arrayidx
   %sub2= add nsw i32 %n, -1
   %idxprom1 = sext i32 %sub2 to i64
-  %arrayidx2 = getelementptr inbounds i8, i8* %P, i64 %idxprom1
-  store i8 0, i8* %arrayidx2
+  %arrayidx2 = getelementptr inbounds i8, ptr %P, i64 %idxprom1
+  store i8 0, ptr %arrayidx2
   ret void
 }
 
@@ -123,16 +123,16 @@ entry:
 ; CHECK-STRICT-LABEL: Sturh_zero
 ; CHECK-STRICT: sturh wzr
 ; CHECK-STRICT: sturh wzr
-define void @Sturh_zero(i16* nocapture %P, i32 %n) {
+define void @Sturh_zero(ptr nocapture %P, i32 %n) {
 entry:
   %sub = add nsw i32 %n, -2
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
-  store i16 0, i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %P, i64 %idxprom
+  store i16 0, ptr %arrayidx
   %sub1 = add nsw i32 %n, -3
   %idxprom2 = sext i32 %sub1 to i64
-  %arrayidx3 = getelementptr inbounds i16, i16* %P, i64 %idxprom2
-  store i16 0, i16* %arrayidx3
+  %arrayidx3 = getelementptr inbounds i16, ptr %P, i64 %idxprom2
+  store i16 0, ptr %arrayidx3
   ret void
 }
 
@@ -143,24 +143,24 @@ entry:
 ; CHECK-STRICT: sturh wzr
 ; CHECK-STRICT: sturh wzr
 ; CHECK-STRICT: sturh wzr
-define void @Sturh_zero_4(i16* nocapture %P, i32 %n) {
+define void @Sturh_zero_4(ptr nocapture %P, i32 %n) {
 entry:
   %sub = add nsw i32 %n, -3
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds i16, i16* %P, i64 %idxprom
-  store i16 0, i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %P, i64 %idxprom
+  store i16 0, ptr %arrayidx
   %sub1 = add nsw i32 %n, -4
   %idxprom2 = sext i32 %sub1 to i64
-  %arrayidx3 = getelementptr inbounds i16, i16* %P, i64 %idxprom2
-  store i16 0, i16* %arrayidx3
+  %arrayidx3 = getelementptr inbounds i16, ptr %P, i64 %idxprom2
+  store i16 0, ptr %arrayidx3
   %sub4 = add nsw i32 %n, -2
   %idxprom5 = sext i32 %sub4 to i64
-  %arrayidx6 = getelementptr inbounds i16, i16* %P, i64 %idxprom5
-  store i16 0, i16* %arrayidx6
+  %arrayidx6 = getelementptr inbounds i16, ptr %P, i64 %idxprom5
+  store i16 0, ptr %arrayidx6
   %sub7 = add nsw i32 %n, -1
   %idxprom8 = sext i32 %sub7 to i64
-  %arrayidx9 = getelementptr inbounds i16, i16* %P, i64 %idxprom8
-  store i16 0, i16* %arrayidx9
+  %arrayidx9 = getelementptr inbounds i16, ptr %P, i64 %idxprom8
+  store i16 0, ptr %arrayidx9
   ret void
 }
 
@@ -168,16 +168,16 @@ entry:
 ; CHECK: stur xzr
 ; CHECK-STRICT-LABEL: Sturw_zero
 ; CHECK-STRICT: stp wzr, wzr
-define void @Sturw_zero(i32* nocapture %P, i32 %n) {
+define void @Sturw_zero(ptr nocapture %P, i32 %n) {
 entry:
   %sub = add nsw i32 %n, -3
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
-  store i32 0, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+  store i32 0, ptr %arrayidx
   %sub1 = add nsw i32 %n, -4
   %idxprom2 = sext i32 %sub1 to i64
-  %arrayidx3 = getelementptr inbounds i32, i32* %P, i64 %idxprom2
-  store i32 0, i32* %arrayidx3
+  %arrayidx3 = getelementptr inbounds i32, ptr %P, i64 %idxprom2
+  store i32 0, ptr %arrayidx3
   ret void
 }
 
@@ -186,24 +186,24 @@ entry:
 ; CHECK-STRICT-LABEL: Sturw_zero_4
 ; CHECK-STRICT: stp wzr, wzr
 ; CHECK-STRICT: stp wzr, wzr
-define void @Sturw_zero_4(i32* nocapture %P, i32 %n) {
+define void @Sturw_zero_4(ptr nocapture %P, i32 %n) {
 entry:
   %sub = add nsw i32 %n, -3
   %idxprom = sext i32 %sub to i64
-  %arrayidx = getelementptr inbounds i32, i32* %P, i64 %idxprom
-  store i32 0, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %P, i64 %idxprom
+  store i32 0, ptr %arrayidx
   %sub1 = add nsw i32 %n, -4
   %idxprom2 = sext i32 %sub1 to i64
-  %arrayidx3 = getelementptr inbounds i32, i32* %P, i64 %idxprom2
-  store i32 0, i32* %arrayidx3
+  %arrayidx3 = getelementptr inbounds i32, ptr %P, i64 %idxprom2
+  store i32 0, ptr %arrayidx3
   %sub4 = add nsw i32 %n, -2
   %idxprom5 = sext i32 %sub4 to i64
-  %arrayidx6 = getelementptr inbounds i32, i32* %P, i64 %idxprom5
-  store i32 0, i32* %arrayidx6
+  %arrayidx6 = getelementptr inbounds i32, ptr %P, i64 %idxprom5
+  store i32 0, ptr %arrayidx6
   %sub7 = add nsw i32 %n, -1
   %idxprom8 = sext i32 %sub7 to i64
-  %arrayidx9 = getelementptr inbounds i32, i32* %P, i64 %idxprom8
-  store i32 0, i32* %arrayidx9
+  %arrayidx9 = getelementptr inbounds i32, ptr %P, i64 %idxprom8
+  store i32 0, ptr %arrayidx9
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll
index 276ac13da40e3..8e2d0352fddef 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-copyPhysReg-tuple.ll
@@ -1,34 +1,34 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s
 ; arm64 has a separate copy due to intrinsics
 
-define <4 x i32> @copyTuple.QPair(i32* %a, i32* %b) {
+define <4 x i32> @copyTuple.QPair(ptr %a, ptr %b) {
 ; CHECK-LABEL: copyTuple.QPair:
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
 ; CHECK: ld2 { {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
 entry:
-  %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>, i64 1, i32* %a)
+  %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 2, i32 2, i32 2, i32 2>, i64 1, ptr %a)
   %extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0
-  %vld1 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i64 1, i32* %b)
+  %vld1 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i64 1, ptr %b)
   %vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld1, 0
   ret <4 x i32> %vld1.fca.0.extract
 }
 
-define <4 x i32> @copyTuple.QTriple(i32* %a, i32* %b, <4 x i32> %c) {
+define <4 x i32> @copyTuple.QTriple(ptr %a, ptr %b, <4 x i32> %c) {
 ; CHECK-LABEL: copyTuple.QTriple:
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
 ; CHECK: ld3 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
 entry:
-  %vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)
+  %vld = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, ptr %a)
   %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
-  %vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, i64 1, i32* %b)
+  %vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, i64 1, ptr %b)
   %vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld1, 0
   ret <4 x i32> %vld1.fca.0.extract
 }
 
-define <4 x i32> @copyTuple.QQuad(i32* %a, i32* %b, <4 x i32> %c) {
+define <4 x i32> @copyTuple.QQuad(ptr %a, ptr %b, <4 x i32> %c) {
 ; CHECK-LABEL: copyTuple.QQuad:
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
@@ -36,13 +36,13 @@ define <4 x i32> @copyTuple.QQuad(i32* %a, i32* %b, <4 x i32> %c) {
 ; CHECK: mov v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
 ; CHECK: ld4 { {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s }[{{[0-9]+}}], [x{{[0-9]+|sp}}]
 entry:
-  %vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %a)
+  %vld = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, <4 x i32> %c, i64 1, ptr %a)
   %extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld, 0
-  %vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, i32* %b)
+  %vld1 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32> %extract, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c, <4 x i32> %c, i64 1, ptr %b)
   %vld1.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld1, 0
   ret <4 x i32> %vld1.fca.0.extract
 }
 
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*)
-declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
-declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*)
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0(<4 x i32>, <4 x i32>, i64, ptr)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, i64, ptr)
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll b/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
index 0bdb7120d0ef8..336f2b3bf4a20 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
@@ -150,149 +150,149 @@ entry:
   ret <2 x i32> %b
 }
 
-define <16 x i8> @test_vld1q_dup_s8(i8* %a) {
+define <16 x i8> @test_vld1q_dup_s8(ptr %a) {
 ; CHECK-LABEL: test_vld1q_dup_s8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.16b }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %a, align 1
+  %0 = load i8, ptr %a, align 1
   %1 = insertelement <16 x i8> undef, i8 %0, i32 0
   %lane = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
   ret <16 x i8> %lane
 }
 
-define <8 x i16> @test_vld1q_dup_s16(i16* %a) {
+define <8 x i16> @test_vld1q_dup_s16(ptr %a) {
 ; CHECK-LABEL: test_vld1q_dup_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %1 = insertelement <8 x i16> undef, i16 %0, i32 0
   %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
   ret <8 x i16> %lane
 }
 
-define <4 x i32> @test_vld1q_dup_s32(i32* %a) {
+define <4 x i32> @test_vld1q_dup_s32(ptr %a) {
 ; CHECK-LABEL: test_vld1q_dup_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.4s }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = insertelement <4 x i32> undef, i32 %0, i32 0
   %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
   ret <4 x i32> %lane
 }
 
-define <2 x i64> @test_vld1q_dup_s64(i64* %a) {
+define <2 x i64> @test_vld1q_dup_s64(ptr %a) {
 ; CHECK-LABEL: test_vld1q_dup_s64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.2d }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %a, align 8
   %1 = insertelement <2 x i64> undef, i64 %0, i32 0
   %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %lane
 }
 
-define <4 x float> @test_vld1q_dup_f32(float* %a) {
+define <4 x float> @test_vld1q_dup_f32(ptr %a) {
 ; CHECK-LABEL: test_vld1q_dup_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.4s }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %1 = insertelement <4 x float> undef, float %0, i32 0
   %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
   ret <4 x float> %lane
 }
 
-define <2 x double> @test_vld1q_dup_f64(double* %a) {
+define <2 x double> @test_vld1q_dup_f64(ptr %a) {
 ; CHECK-LABEL: test_vld1q_dup_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.2d }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %1 = insertelement <2 x double> undef, double %0, i32 0
   %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
   ret <2 x double> %lane
 }
 
-define <8 x i8> @test_vld1_dup_s8(i8* %a) {
+define <8 x i8> @test_vld1_dup_s8(ptr %a) {
 ; CHECK-LABEL: test_vld1_dup_s8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.8b }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %a, align 1
+  %0 = load i8, ptr %a, align 1
   %1 = insertelement <8 x i8> undef, i8 %0, i32 0
   %lane = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
   ret <8 x i8> %lane
 }
 
-define <4 x i16> @test_vld1_dup_s16(i16* %a) {
+define <4 x i16> @test_vld1_dup_s16(ptr %a) {
 ; CHECK-LABEL: test_vld1_dup_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %1 = insertelement <4 x i16> undef, i16 %0, i32 0
   %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
   ret <4 x i16> %lane
 }
 
-define <2 x i32> @test_vld1_dup_s32(i32* %a) {
+define <2 x i32> @test_vld1_dup_s32(ptr %a) {
 ; CHECK-LABEL: test_vld1_dup_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.2s }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = insertelement <2 x i32> undef, i32 %0, i32 0
   %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
   ret <2 x i32> %lane
 }
 
-define <1 x i64> @test_vld1_dup_s64(i64* %a) {
+define <1 x i64> @test_vld1_dup_s64(ptr %a) {
 ; CHECK-LABEL: test_vld1_dup_s64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %a, align 8
   %1 = insertelement <1 x i64> undef, i64 %0, i32 0
   ret <1 x i64> %1
 }
 
-define <2 x float> @test_vld1_dup_f32(float* %a) {
+define <2 x float> @test_vld1_dup_f32(ptr %a) {
 ; CHECK-LABEL: test_vld1_dup_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.2s }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %1 = insertelement <2 x float> undef, float %0, i32 0
   %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
   ret <2 x float> %lane
 }
 
-define <1 x double> @test_vld1_dup_f64(double* %a) {
+define <1 x double> @test_vld1_dup_f64(ptr %a) {
 ; CHECK-LABEL: test_vld1_dup_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %1 = insertelement <1 x double> undef, double %0, i32 0
   ret <1 x double> %1
 }
 
-define <1 x i64> @testDUP.v1i64(i64* %a, i64* %b) #0 {
+define <1 x i64> @testDUP.v1i64(ptr %a, ptr %b) #0 {
 ; As there is a store operation depending on %1, LD1R pattern can't be selected.
 ; So LDR and FMOV should be emitted.
 ; CHECK-LABEL: testDUP.v1i64:
@@ -301,13 +301,13 @@ define <1 x i64> @testDUP.v1i64(i64* %a, i64* %b) #0 {
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %1 = load i64, i64* %a, align 8
-  store i64 %1, i64* %b, align 8
+  %1 = load i64, ptr %a, align 8
+  store i64 %1, ptr %b, align 8
   %vecinit.i = insertelement <1 x i64> undef, i64 %1, i32 0
   ret <1 x i64> %vecinit.i
 }
 
-define <1 x double> @testDUP.v1f64(double* %a, double* %b) #0 {
+define <1 x double> @testDUP.v1f64(ptr %a, ptr %b) #0 {
 ; As there is a store operation depending on %1, LD1R pattern can't be selected.
 ; So LDR and FMOV should be emitted.
 ; CHECK-LABEL: testDUP.v1f64:
@@ -315,79 +315,79 @@ define <1 x double> @testDUP.v1f64(double* %a, double* %b) #0 {
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %1 = load double, double* %a, align 8
-  store double %1, double* %b, align 8
+  %1 = load double, ptr %a, align 8
+  store double %1, ptr %b, align 8
   %vecinit.i = insertelement <1 x double> undef, double %1, i32 0
   ret <1 x double> %vecinit.i
 }
 
-define <16 x i8> @test_vld1q_lane_s8(i8* %a, <16 x i8> %b) {
+define <16 x i8> @test_vld1q_lane_s8(ptr %a, <16 x i8> %b) {
 ; CHECK-LABEL: test_vld1q_lane_s8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.b }[15], [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %a, align 1
+  %0 = load i8, ptr %a, align 1
   %vld1_lane = insertelement <16 x i8> %b, i8 %0, i32 15
   ret <16 x i8> %vld1_lane
 }
 
-define <8 x i16> @test_vld1q_lane_s16(i16* %a, <8 x i16> %b) {
+define <8 x i16> @test_vld1q_lane_s16(ptr %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_vld1q_lane_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.h }[7], [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %vld1_lane = insertelement <8 x i16> %b, i16 %0, i32 7
   ret <8 x i16> %vld1_lane
 }
 
-define <4 x i32> @test_vld1q_lane_s32(i32* %a, <4 x i32> %b) {
+define <4 x i32> @test_vld1q_lane_s32(ptr %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_vld1q_lane_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.s }[3], [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %vld1_lane = insertelement <4 x i32> %b, i32 %0, i32 3
   ret <4 x i32> %vld1_lane
 }
 
-define <2 x i64> @test_vld1q_lane_s64(i64* %a, <2 x i64> %b) {
+define <2 x i64> @test_vld1q_lane_s64(ptr %a, <2 x i64> %b) {
 ; CHECK-LABEL: test_vld1q_lane_s64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.d }[1], [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %a, align 8
   %vld1_lane = insertelement <2 x i64> %b, i64 %0, i32 1
   ret <2 x i64> %vld1_lane
 }
 
-define <4 x float> @test_vld1q_lane_f32(float* %a, <4 x float> %b) {
+define <4 x float> @test_vld1q_lane_f32(ptr %a, <4 x float> %b) {
 ; CHECK-LABEL: test_vld1q_lane_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.s }[3], [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %vld1_lane = insertelement <4 x float> %b, float %0, i32 3
   ret <4 x float> %vld1_lane
 }
 
-define <2 x double> @test_vld1q_lane_f64(double* %a, <2 x double> %b) {
+define <2 x double> @test_vld1q_lane_f64(ptr %a, <2 x double> %b) {
 ; CHECK-LABEL: test_vld1q_lane_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1 { v0.d }[1], [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %vld1_lane = insertelement <2 x double> %b, double %0, i32 1
   ret <2 x double> %vld1_lane
 }
 
-define <8 x i8> @test_vld1_lane_s8(i8* %a, <8 x i8> %b) {
+define <8 x i8> @test_vld1_lane_s8(ptr %a, <8 x i8> %b) {
 ; CHECK-LABEL: test_vld1_lane_s8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -395,12 +395,12 @@ define <8 x i8> @test_vld1_lane_s8(i8* %a, <8 x i8> %b) {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %a, align 1
+  %0 = load i8, ptr %a, align 1
   %vld1_lane = insertelement <8 x i8> %b, i8 %0, i32 7
   ret <8 x i8> %vld1_lane
 }
 
-define <4 x i16> @test_vld1_lane_s16(i16* %a, <4 x i16> %b) {
+define <4 x i16> @test_vld1_lane_s16(ptr %a, <4 x i16> %b) {
 ; CHECK-LABEL: test_vld1_lane_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -408,12 +408,12 @@ define <4 x i16> @test_vld1_lane_s16(i16* %a, <4 x i16> %b) {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* %a, align 2
+  %0 = load i16, ptr %a, align 2
   %vld1_lane = insertelement <4 x i16> %b, i16 %0, i32 3
   ret <4 x i16> %vld1_lane
 }
 
-define <2 x i32> @test_vld1_lane_s32(i32* %a, <2 x i32> %b) {
+define <2 x i32> @test_vld1_lane_s32(ptr %a, <2 x i32> %b) {
 ; CHECK-LABEL: test_vld1_lane_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -421,23 +421,23 @@ define <2 x i32> @test_vld1_lane_s32(i32* %a, <2 x i32> %b) {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %a, align 4
+  %0 = load i32, ptr %a, align 4
   %vld1_lane = insertelement <2 x i32> %b, i32 %0, i32 1
   ret <2 x i32> %vld1_lane
 }
 
-define <1 x i64> @test_vld1_lane_s64(i64* %a, <1 x i64> %b) {
+define <1 x i64> @test_vld1_lane_s64(ptr %a, <1 x i64> %b) {
 ; CHECK-LABEL: test_vld1_lane_s64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i64, i64* %a, align 8
+  %0 = load i64, ptr %a, align 8
   %vld1_lane = insertelement <1 x i64> undef, i64 %0, i32 0
   ret <1 x i64> %vld1_lane
 }
 
-define <2 x float> @test_vld1_lane_f32(float* %a, <2 x float> %b) {
+define <2 x float> @test_vld1_lane_f32(ptr %a, <2 x float> %b) {
 ; CHECK-LABEL: test_vld1_lane_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -445,144 +445,144 @@ define <2 x float> @test_vld1_lane_f32(float* %a, <2 x float> %b) {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %vld1_lane = insertelement <2 x float> %b, float %0, i32 1
   ret <2 x float> %vld1_lane
 }
 
-define <1 x double> @test_vld1_lane_f64(double* %a, <1 x double> %b) {
+define <1 x double> @test_vld1_lane_f64(ptr %a, <1 x double> %b) {
 ; CHECK-LABEL: test_vld1_lane_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load double, double* %a, align 8
+  %0 = load double, ptr %a, align 8
   %vld1_lane = insertelement <1 x double> undef, double %0, i32 0
   ret <1 x double> %vld1_lane
 }
 
-define void @test_vst1q_lane_s8(i8* %a, <16 x i8> %b) {
+define void @test_vst1q_lane_s8(ptr %a, <16 x i8> %b) {
 ; CHECK-LABEL: test_vst1q_lane_s8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st1 { v0.b }[15], [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <16 x i8> %b, i32 15
-  store i8 %0, i8* %a, align 1
+  store i8 %0, ptr %a, align 1
   ret void
 }
 
-define void @test_vst1q_lane_s16(i16* %a, <8 x i16> %b) {
+define void @test_vst1q_lane_s16(ptr %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_vst1q_lane_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st1 { v0.h }[7], [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <8 x i16> %b, i32 7
-  store i16 %0, i16* %a, align 2
+  store i16 %0, ptr %a, align 2
   ret void
 }
 
-define void @test_vst1q_lane0_s16(i16* %a, <8 x i16> %b) {
+define void @test_vst1q_lane0_s16(ptr %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_vst1q_lane0_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str h0, [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <8 x i16> %b, i32 0
-  store i16 %0, i16* %a, align 2
+  store i16 %0, ptr %a, align 2
   ret void
 }
 
-define void @test_vst1q_lane_s32(i32* %a, <4 x i32> %b) {
+define void @test_vst1q_lane_s32(ptr %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_vst1q_lane_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st1 { v0.s }[3], [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <4 x i32> %b, i32 3
-  store i32 %0, i32* %a, align 4
+  store i32 %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1q_lane0_s32(i32* %a, <4 x i32> %b) {
+define void @test_vst1q_lane0_s32(ptr %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_vst1q_lane0_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str s0, [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <4 x i32> %b, i32 0
-  store i32 %0, i32* %a, align 4
+  store i32 %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1q_lane_s64(i64* %a, <2 x i64> %b) {
+define void @test_vst1q_lane_s64(ptr %a, <2 x i64> %b) {
 ; CHECK-LABEL: test_vst1q_lane_s64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st1 { v0.d }[1], [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x i64> %b, i32 1
-  store i64 %0, i64* %a, align 8
+  store i64 %0, ptr %a, align 8
   ret void
 }
 
-define void @test_vst1q_lane0_s64(i64* %a, <2 x i64> %b) {
+define void @test_vst1q_lane0_s64(ptr %a, <2 x i64> %b) {
 ; CHECK-LABEL: test_vst1q_lane0_s64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x i64> %b, i32 0
-  store i64 %0, i64* %a, align 8
+  store i64 %0, ptr %a, align 8
   ret void
 }
 
-define void @test_vst1q_lane_f32(float* %a, <4 x float> %b) {
+define void @test_vst1q_lane_f32(ptr %a, <4 x float> %b) {
 ; CHECK-LABEL: test_vst1q_lane_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st1 { v0.s }[3], [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <4 x float> %b, i32 3
-  store float %0, float* %a, align 4
+  store float %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1q_lane0_f32(float* %a, <4 x float> %b) {
+define void @test_vst1q_lane0_f32(ptr %a, <4 x float> %b) {
 ; CHECK-LABEL: test_vst1q_lane0_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str s0, [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <4 x float> %b, i32 0
-  store float %0, float* %a, align 4
+  store float %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1q_lane_f64(double* %a, <2 x double> %b) {
+define void @test_vst1q_lane_f64(ptr %a, <2 x double> %b) {
 ; CHECK-LABEL: test_vst1q_lane_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st1 { v0.d }[1], [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x double> %b, i32 1
-  store double %0, double* %a, align 8
+  store double %0, ptr %a, align 8
   ret void
 }
 
-define void @test_vst1q_lane0_f64(double* %a, <2 x double> %b) {
+define void @test_vst1q_lane0_f64(ptr %a, <2 x double> %b) {
 ; CHECK-LABEL: test_vst1q_lane0_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x double> %b, i32 0
-  store double %0, double* %a, align 8
+  store double %0, ptr %a, align 8
   ret void
 }
 
-define void @test_vst1_lane_s8(i8* %a, <8 x i8> %b) {
+define void @test_vst1_lane_s8(ptr %a, <8 x i8> %b) {
 ; CHECK-LABEL: test_vst1_lane_s8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -590,11 +590,11 @@ define void @test_vst1_lane_s8(i8* %a, <8 x i8> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <8 x i8> %b, i32 7
-  store i8 %0, i8* %a, align 1
+  store i8 %0, ptr %a, align 1
   ret void
 }
 
-define void @test_vst1_lane_s16(i16* %a, <4 x i16> %b) {
+define void @test_vst1_lane_s16(ptr %a, <4 x i16> %b) {
 ; CHECK-LABEL: test_vst1_lane_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -602,11 +602,11 @@ define void @test_vst1_lane_s16(i16* %a, <4 x i16> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <4 x i16> %b, i32 3
-  store i16 %0, i16* %a, align 2
+  store i16 %0, ptr %a, align 2
   ret void
 }
 
-define void @test_vst1_lane0_s16(i16* %a, <4 x i16> %b) {
+define void @test_vst1_lane0_s16(ptr %a, <4 x i16> %b) {
 ; CHECK-LABEL: test_vst1_lane0_s16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -614,11 +614,11 @@ define void @test_vst1_lane0_s16(i16* %a, <4 x i16> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <4 x i16> %b, i32 0
-  store i16 %0, i16* %a, align 2
+  store i16 %0, ptr %a, align 2
   ret void
 }
 
-define void @test_vst1_lane_s32(i32* %a, <2 x i32> %b) {
+define void @test_vst1_lane_s32(ptr %a, <2 x i32> %b) {
 ; CHECK-LABEL: test_vst1_lane_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -626,11 +626,11 @@ define void @test_vst1_lane_s32(i32* %a, <2 x i32> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x i32> %b, i32 1
-  store i32 %0, i32* %a, align 4
+  store i32 %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1_lane0_s32(i32* %a, <2 x i32> %b) {
+define void @test_vst1_lane0_s32(ptr %a, <2 x i32> %b) {
 ; CHECK-LABEL: test_vst1_lane0_s32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -638,11 +638,11 @@ define void @test_vst1_lane0_s32(i32* %a, <2 x i32> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x i32> %b, i32 0
-  store i32 %0, i32* %a, align 4
+  store i32 %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1_lane_s64(i64* %a, <1 x i64> %b) {
+define void @test_vst1_lane_s64(ptr %a, <1 x i64> %b) {
 ; CHECK-LABEL: test_vst1_lane_s64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -650,11 +650,11 @@ define void @test_vst1_lane_s64(i64* %a, <1 x i64> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <1 x i64> %b, i32 0
-  store i64 %0, i64* %a, align 8
+  store i64 %0, ptr %a, align 8
   ret void
 }
 
-define void @test_vst1_lane_f32(float* %a, <2 x float> %b) {
+define void @test_vst1_lane_f32(ptr %a, <2 x float> %b) {
 ; CHECK-LABEL: test_vst1_lane_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -662,11 +662,11 @@ define void @test_vst1_lane_f32(float* %a, <2 x float> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x float> %b, i32 1
-  store float %0, float* %a, align 4
+  store float %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1_lane0_f32(float* %a, <2 x float> %b) {
+define void @test_vst1_lane0_f32(ptr %a, <2 x float> %b) {
 ; CHECK-LABEL: test_vst1_lane0_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -674,17 +674,17 @@ define void @test_vst1_lane0_f32(float* %a, <2 x float> %b) {
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <2 x float> %b, i32 0
-  store float %0, float* %a, align 4
+  store float %0, ptr %a, align 4
   ret void
 }
 
-define void @test_vst1_lane_f64(double* %a, <1 x double> %b) {
+define void @test_vst1_lane_f64(ptr %a, <1 x double> %b) {
 ; CHECK-LABEL: test_vst1_lane_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = extractelement <1 x double> %b, i32 0
-  store double %0, double* %a, align 8
+  store double %0, ptr %a, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll b/llvm/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll
index 8262fe43a66c8..6fa70561f18e7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-vector-list-spill.ll
@@ -4,13 +4,13 @@
 ; test case seems very simple and the register pressure is not high. If the
 ; spill/fill algorithm is optimized, this test case may not be triggered. And
 ; then we can delete it.
-define i32 @spill.DPairReg(i32* %arg1, i32 %arg2) {
+define i32 @spill.DPairReg(ptr %arg1, i32 %arg2) {
 ; CHECK-LABEL: spill.DPairReg:
 ; CHECK: ld2 { v{{[0-9]+}}.2s, v{{[0-9]+}}.2s }, [{{x[0-9]+|sp}}]
 ; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 ; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 entry:
-  %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %arg1)
+  %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr %arg1)
   %cmp = icmp eq i32 %arg2, 0
   br i1 %cmp, label %if.then, label %if.end
 
@@ -24,13 +24,13 @@ if.end:
   ret i32 %res
 }
 
-define i16 @spill.DTripleReg(i16* %arg1, i32 %arg2) {
+define i16 @spill.DTripleReg(ptr %arg1, i32 %arg2) {
 ; CHECK-LABEL: spill.DTripleReg:
 ; CHECK: ld3 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}]
 ; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 ; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 entry:
-  %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1)
+  %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr %arg1)
   %cmp = icmp eq i32 %arg2, 0
   br i1 %cmp, label %if.then, label %if.end
 
@@ -44,13 +44,13 @@ if.end:
   ret i16 %res
 }
 
-define i16 @spill.DQuadReg(i16* %arg1, i32 %arg2) {
+define i16 @spill.DQuadReg(ptr %arg1, i32 %arg2) {
 ; CHECK-LABEL: spill.DQuadReg:
 ; CHECK: ld4 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}]
 ; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 ; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 entry:
-  %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %arg1)
+  %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr %arg1)
   %cmp = icmp eq i32 %arg2, 0
   br i1 %cmp, label %if.then, label %if.end
 
@@ -64,13 +64,13 @@ if.end:
   ret i16 %res
 }
 
-define i32 @spill.QPairReg(i32* %arg1, i32 %arg2) {
+define i32 @spill.QPairReg(ptr %arg1, i32 %arg2) {
 ; CHECK-LABEL: spill.QPairReg:
 ; CHECK: ld2 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}]
 ; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 ; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 entry:
-  %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %arg1)
+  %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr %arg1)
   %cmp = icmp eq i32 %arg2, 0
   br i1 %cmp, label %if.then, label %if.end
 
@@ -84,13 +84,13 @@ if.end:
   ret i32 %res
 }
 
-define float @spill.QTripleReg(float* %arg1, i32 %arg2) {
+define float @spill.QTripleReg(ptr %arg1, i32 %arg2) {
 ; CHECK-LABEL: spill.QTripleReg:
 ; CHECK: ld3 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}]
 ; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 ; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 entry:
-  %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %arg1)
+  %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr %arg1)
   %cmp = icmp eq i32 %arg2, 0
   br i1 %cmp, label %if.then, label %if.end
 
@@ -104,13 +104,13 @@ if.end:
   ret float %res
 }
 
-define i8 @spill.QQuadReg(i8* %arg1, i32 %arg2) {
+define i8 @spill.QQuadReg(ptr %arg1, i32 %arg2) {
 ; CHECK-LABEL: spill.QQuadReg:
 ; CHECK: ld4 { v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b }, [{{x[0-9]+|sp}}]
 ; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 ; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
 entry:
-  %vld = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %arg1)
+  %vld = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %arg1)
   %cmp = icmp eq i32 %arg2, 0
   br i1 %cmp, label %if.then, label %if.end
 
@@ -124,12 +124,12 @@ if.end:
   ret i8 %res
 }
 
-declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*)
-declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*)
-declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*)
-declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*)
-declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
-declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*)
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0(ptr)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0(ptr)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0(ptr)
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0(ptr)
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0(ptr)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr)
 
 declare void @foo()
 
@@ -138,8 +138,8 @@ declare void @foo()
 ; spill/fill algorithm is optimized, this test case may not be triggered. And
 ; then we can delete it.
 ; check the spill for Register Class QPair_with_qsub_0_in_FPR128Lo
-define <8 x i16> @test_2xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
-  tail call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
+define <8 x i16> @test_2xFPR128Lo(i64 %got, ptr %ptr, <1 x i64> %a) {
+  tail call void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, ptr %ptr)
   tail call void @foo()
   %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
   %1 = bitcast <2 x i64> %sv to <8 x i16>
@@ -149,8 +149,8 @@ define <8 x i16> @test_2xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
 }
 
 ; check the spill for Register Class QTriple_with_qsub_0_in_FPR128Lo
-define <8 x i16> @test_3xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
-  tail call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
+define <8 x i16> @test_3xFPR128Lo(i64 %got, ptr %ptr, <1 x i64> %a) {
+  tail call void @llvm.aarch64.neon.st3lane.v1i64.p0(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, ptr %ptr)
   tail call void @foo()
   %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
   %1 = bitcast <2 x i64> %sv to <8 x i16>
@@ -160,8 +160,8 @@ define <8 x i16> @test_3xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
 }
 
 ; check the spill for Register Class QQuad_with_qsub_0_in_FPR128Lo
-define <8 x i16> @test_4xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
-  tail call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
+define <8 x i16> @test_4xFPR128Lo(i64 %got, ptr %ptr, <1 x i64> %a) {
+  tail call void @llvm.aarch64.neon.st4lane.v1i64.p0(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, ptr %ptr)
   tail call void @foo()
   %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
   %1 = bitcast <2 x i64> %sv to <8 x i16>
@@ -170,6 +170,6 @@ define <8 x i16> @test_4xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
   ret <8 x i16> %3
 }
 
-declare void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*)
-declare void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
-declare void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
+declare void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr)
+declare void @llvm.aarch64.neon.st3lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, i64, ptr)
+declare void @llvm.aarch64.neon.st4lane.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-vector-shuffle-extract.ll b/llvm/test/CodeGen/AArch64/arm64-neon-vector-shuffle-extract.ll
index 2be8b014ebbee..3e6d6418db7b4 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-vector-shuffle-extract.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-vector-shuffle-extract.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-unknown-linux -o - | FileCheck %s
 
-define void @test(i32* %p1, i32* %p2) {
+define void @test(ptr %p1, ptr %p2) {
 ; CHECK-LABEL: test:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3
@@ -15,8 +15,8 @@ define void @test(i32* %p1, i32* %p2) {
   %tmp4 = shufflevector <6 x i32> undef, <6 x i32> %tmp3, <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
   %tmp6 = extractelement <9 x i32> %tmp4, i32 7
   %tmp8 = extractelement <9 x i32> %tmp4, i32 8
-  store i32 %tmp6, i32* %p1, align 4
-  store i32 %tmp8, i32* %p2, align 4
+  store i32 %tmp6, ptr %p1, align 4
+  store i32 %tmp8, ptr %p2, align 4
   ret void
 }
 
@@ -32,7 +32,7 @@ define <4 x i32> @widen_shuffles_reduced(<3 x i32> %x, <3 x i32> %y) {
   ret <4 x i32> %s3
 }
 
-define void @zip_mask_check(<3 x float>* %p1, <3 x float>* %p2, i32* %p3) {
+define void @zip_mask_check(ptr %p1, ptr %p2, ptr %p3) {
 ; CHECK-LABEL: zip_mask_check:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -42,8 +42,8 @@ define void @zip_mask_check(<3 x float>* %p1, <3 x float>* %p2, i32* %p3) {
 ; CHECK-NEXT:    fmla v0.4s, v0.4s, v0.4s
 ; CHECK-NEXT:    str s0, [x2]
 ; CHECK-NEXT:    ret
-  %tmp3 = load <3 x float>, <3 x float>* %p1, align 16
-  %tmp4 = load <3 x float>, <3 x float>* %p2, align 4
+  %tmp3 = load <3 x float>, ptr %p1, align 16
+  %tmp4 = load <3 x float>, ptr %p2, align 4
   %tmp5 = shufflevector <3 x float> %tmp3, <3 x float> %tmp4, <4 x i32> <i32 1, i32 4, i32 undef, i32 undef>
   %tmp6 = shufflevector <4 x float> %tmp5, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 5, i32 undef>
   %tmp7 = shufflevector <4 x float> %tmp6, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 5>
@@ -52,7 +52,7 @@ define void @zip_mask_check(<3 x float>* %p1, <3 x float>* %p2, i32* %p3) {
   %tmp10 = shufflevector <4 x float> %tmp9, <4 x float> undef, <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %tmp11 = bitcast <16 x float> %tmp10 to <16 x i32>
   %tmp12 = extractelement <16 x i32> %tmp11, i32 0
-  store i32 %tmp12, i32* %p3, align 4
+  store i32 %tmp12, ptr %p3, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
index c87cb5bd6a806..527393657530b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-apple-ios -aarch64-enable-collect-loh=false | FileCheck %s
 
-define void @test(float * %p1, i32 %v1) {
+define void @test(ptr %p1, i32 %v1) {
 ; CHECK-LABEL: test:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -17,11 +17,11 @@ define void @test(float * %p1, i32 %v1) {
 ; CHECK-NEXT:    ret
 entry:
   %v2 = extractelement <3 x float> <float 0.000000e+00, float 2.000000e+00, float 0.000000e+00>, i32 %v1
-  store float %v2, float* %p1, align 4
+  store float %v2, ptr %p1, align 4
   ret void
 }
 
-define void @test2(float * %p1, i32 %v1) {
+define void @test2(ptr %p1, i32 %v1) {
 ; CHECK-LABEL: test2:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -37,7 +37,7 @@ define void @test2(float * %p1, i32 %v1) {
 ; CHECK-NEXT:    ret
 entry:
   %v2 = extractelement <3 x float> <float 0.7470588088035583, float 0.7470588088035583, float 0.7470588088035583>, i32 %v1
-  store float %v2, float* %p1, align 4
+  store float %v2, ptr %p1, align 4
   ret void
 }
 
@@ -57,21 +57,20 @@ define internal void @nvcast_f32_v8i8() {
 ; CHECK-NEXT:    str d0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  store <2 x float> <float 0xC7DFDFDFC0000000, float 0xC7DFDFDFC0000000>, <2 x float>* bitcast (%"st1"* @_gv to <2 x float>*), align 8
+  store <2 x float> <float 0xC7DFDFDFC0000000, float 0xC7DFDFDFC0000000>, ptr @_gv, align 8
   ret void
 }
 
 %struct.Vector3 = type { float, float, float }
 
-define void @nvcast_v2f32_v1f64(%struct.Vector3*) {
+define void @nvcast_v2f32_v1f64(ptr) {
 ; CHECK-LABEL: nvcast_v2f32_v1f64:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    fmov.2s v0, #1.00000000
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %a13 = bitcast %struct.Vector3* %0 to <1 x double>*
-  store <1 x double> <double 0x3F8000003F800000>, <1 x double>* %a13, align 8
+  store <1 x double> <double 0x3F8000003F800000>, ptr %0, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-pic-local-symbol.ll b/llvm/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
index dae243e8da2ca..7bdc88b249e1e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
@@ -7,7 +7,7 @@ define i32 @get() {
 ; CHECK: get:
 ; CHECK: adrp x{{[0-9]+}}, a
 ; CHECK-NEXT: ldr w{{[0-9]+}}, [x{{[0-9]}}, :lo12:a]
-  %res = load i32, i32* @a, align 4
+  %res = load i32, ptr @a, align 4
   ret i32 %res
 }
 
@@ -15,8 +15,8 @@ define void @foo() nounwind {
 ; CHECK: foo:
 ; CHECK: adrp x{{[0-9]}}, .L.str
 ; CHECK-NEXT: add x{{[0-9]}}, x{{[0-9]}}, :lo12:.L.str
-  tail call void @bar(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0))
+  tail call void @bar(ptr @.str)
   ret void
 }
 
-declare void @bar(i8*)
+declare void @bar(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll b/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll
index 95f46b7904c6d..713c9e8592350 100644
--- a/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll
@@ -115,8 +115,8 @@
 @var = global [30 x i64] zeroinitializer
 
 define void @keep_live() {
-  %val = load volatile [30 x i64], [30 x i64]* @var
-  store volatile [30 x i64] %val, [30 x i64]* @var
+  %val = load volatile [30 x i64], ptr @var
+  store volatile [30 x i64] %val, ptr @var
 
 ; CHECK: ldr x18
 ; CHECK: str x18

diff  --git a/llvm/test/CodeGen/AArch64/arm64-prefetch.ll b/llvm/test/CodeGen/AArch64/arm64-prefetch.ll
index 3d526640613e3..55652e3de5a17 100644
--- a/llvm/test/CodeGen/AArch64/arm64-prefetch.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-prefetch.ll
@@ -1,122 +1,109 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 ; RUN: llc -O0 --global-isel-abort=1 < %s -mtriple=arm64-eabi | FileCheck %s
 
- at a = common global i32* null, align 8
+ at a = common global ptr null, align 8
 
 define void @test(i32 %i, i32 %j) nounwind ssp {
 entry:
   ; CHECK: @test
   %j.addr = alloca i32, align 4
-  store i32 %j, i32* %j.addr, align 4, !tbaa !0
-  %tmp = bitcast i32* %j.addr to i8*
+  store i32 %j, ptr %j.addr, align 4, !tbaa !0
   ; CHECK: prfum pldl1strm
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 0, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 0, i32 1)
   ; CHECK: prfum pldl3keep
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 1, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 1, i32 1)
   ; CHECK: prfum pldl2keep
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 2, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 2, i32 1)
   ; CHECK: prfum pldl1keep
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 3, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 3, i32 1)
 
   ; CHECK: prfum plil1strm
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 0, i32 0)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 0, i32 0)
   ; CHECK: prfum plil3keep
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 1, i32 0)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 1, i32 0)
   ; CHECK: prfum plil2keep
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 2, i32 0)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 2, i32 0)
   ; CHECK: prfum plil1keep
-  call void @llvm.prefetch(i8* %tmp, i32 0, i32 3, i32 0)
+  call void @llvm.prefetch(ptr %j.addr, i32 0, i32 3, i32 0)
 
   ; CHECK: prfum pstl1strm
-  call void @llvm.prefetch(i8* %tmp, i32 1, i32 0, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 1, i32 0, i32 1)
   ; CHECK: prfum pstl3keep
-  call void @llvm.prefetch(i8* %tmp, i32 1, i32 1, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 1, i32 1, i32 1)
   ; CHECK: prfum pstl2keep
-  call void @llvm.prefetch(i8* %tmp, i32 1, i32 2, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 1, i32 2, i32 1)
   ; CHECK: prfum pstl1keep
-  call void @llvm.prefetch(i8* %tmp, i32 1, i32 3, i32 1)
+  call void @llvm.prefetch(ptr %j.addr, i32 1, i32 3, i32 1)
 
-  %tmp1 = load i32, i32* %j.addr, align 4, !tbaa !0
+  %tmp1 = load i32, ptr %j.addr, align 4, !tbaa !0
   %add = add nsw i32 %tmp1, %i
   %idxprom = sext i32 %add to i64
-  %tmp2 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx = getelementptr inbounds i32, i32* %tmp2, i64 %idxprom
-  %tmp3 = bitcast i32* %arrayidx to i8*
+  %tmp2 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx = getelementptr inbounds i32, ptr %tmp2, i64 %idxprom
 
   ; CHECK: prfm pldl1strm
-  call void @llvm.prefetch(i8* %tmp3, i32 0, i32 0, i32 1)
-  %tmp4 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom
-  %tmp5 = bitcast i32* %arrayidx3 to i8*
+  call void @llvm.prefetch(ptr %arrayidx, i32 0, i32 0, i32 1)
+  %tmp4 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx3 = getelementptr inbounds i32, ptr %tmp4, i64 %idxprom
 
   ; CHECK: prfm pldl3keep
-  call void @llvm.prefetch(i8* %tmp5, i32 0, i32 1, i32 1)
-  %tmp6 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx6 = getelementptr inbounds i32, i32* %tmp6, i64 %idxprom
-  %tmp7 = bitcast i32* %arrayidx6 to i8*
+  call void @llvm.prefetch(ptr %arrayidx3, i32 0, i32 1, i32 1)
+  %tmp6 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx6 = getelementptr inbounds i32, ptr %tmp6, i64 %idxprom
 
   ; CHECK: prfm pldl2keep
-  call void @llvm.prefetch(i8* %tmp7, i32 0, i32 2, i32 1)
-  %tmp8 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx9 = getelementptr inbounds i32, i32* %tmp8, i64 %idxprom
-  %tmp9 = bitcast i32* %arrayidx9 to i8*
+  call void @llvm.prefetch(ptr %arrayidx6, i32 0, i32 2, i32 1)
+  %tmp8 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx9 = getelementptr inbounds i32, ptr %tmp8, i64 %idxprom
 
   ; CHECK: prfm pldl1keep
-  call void @llvm.prefetch(i8* %tmp9, i32 0, i32 3, i32 1)
-  %tmp10 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx12 = getelementptr inbounds i32, i32* %tmp10, i64 %idxprom
-  %tmp11 = bitcast i32* %arrayidx12 to i8*
+  call void @llvm.prefetch(ptr %arrayidx9, i32 0, i32 3, i32 1)
+  %tmp10 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx12 = getelementptr inbounds i32, ptr %tmp10, i64 %idxprom
 
 
   ; CHECK: prfm plil1strm
-  call void @llvm.prefetch(i8* %tmp11, i32 0, i32 0, i32 0)
-  %tmp12 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx15 = getelementptr inbounds i32, i32* %tmp12, i64 %idxprom
-  %tmp13 = bitcast i32* %arrayidx3 to i8*
+  call void @llvm.prefetch(ptr %arrayidx12, i32 0, i32 0, i32 0)
+  %tmp12 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx15 = getelementptr inbounds i32, ptr %tmp12, i64 %idxprom
 
   ; CHECK: prfm plil3keep
-  call void @llvm.prefetch(i8* %tmp13, i32 0, i32 1, i32 0)
-  %tmp14 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx18 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom
-  %tmp15 = bitcast i32* %arrayidx6 to i8*
+  call void @llvm.prefetch(ptr %arrayidx3, i32 0, i32 1, i32 0)
+  %tmp14 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx18 = getelementptr inbounds i32, ptr %tmp14, i64 %idxprom
 
   ; CHECK: prfm plil2keep
-  call void @llvm.prefetch(i8* %tmp15, i32 0, i32 2, i32 0)
-  %tmp16 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx21 = getelementptr inbounds i32, i32* %tmp16, i64 %idxprom
-  %tmp17 = bitcast i32* %arrayidx9 to i8*
+  call void @llvm.prefetch(ptr %arrayidx6, i32 0, i32 2, i32 0)
+  %tmp16 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx21 = getelementptr inbounds i32, ptr %tmp16, i64 %idxprom
 
   ; CHECK: prfm plil1keep
-  call void @llvm.prefetch(i8* %tmp17, i32 0, i32 3, i32 0)
-  %tmp18 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx24 = getelementptr inbounds i32, i32* %tmp18, i64 %idxprom
-  %tmp19 = bitcast i32* %arrayidx12 to i8*
+  call void @llvm.prefetch(ptr %arrayidx9, i32 0, i32 3, i32 0)
+  %tmp18 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx24 = getelementptr inbounds i32, ptr %tmp18, i64 %idxprom
 
 
   ; CHECK: prfm pstl1strm
-  call void @llvm.prefetch(i8* %tmp19, i32 1, i32 0, i32 1)
-  %tmp20 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx27 = getelementptr inbounds i32, i32* %tmp20, i64 %idxprom
-  %tmp21 = bitcast i32* %arrayidx15 to i8*
+  call void @llvm.prefetch(ptr %arrayidx12, i32 1, i32 0, i32 1)
+  %tmp20 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx27 = getelementptr inbounds i32, ptr %tmp20, i64 %idxprom
 
   ; CHECK: prfm pstl3keep
-  call void @llvm.prefetch(i8* %tmp21, i32 1, i32 1, i32 1)
-  %tmp22 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx30 = getelementptr inbounds i32, i32* %tmp22, i64 %idxprom
-  %tmp23 = bitcast i32* %arrayidx18 to i8*
+  call void @llvm.prefetch(ptr %arrayidx15, i32 1, i32 1, i32 1)
+  %tmp22 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx30 = getelementptr inbounds i32, ptr %tmp22, i64 %idxprom
 
   ; CHECK: prfm pstl2keep
-  call void @llvm.prefetch(i8* %tmp23, i32 1, i32 2, i32 1)
-  %tmp24 = load i32*, i32** @a, align 8, !tbaa !3
-  %arrayidx33 = getelementptr inbounds i32, i32* %tmp24, i64 %idxprom
-  %tmp25 = bitcast i32* %arrayidx21 to i8*
+  call void @llvm.prefetch(ptr %arrayidx18, i32 1, i32 2, i32 1)
+  %tmp24 = load ptr, ptr @a, align 8, !tbaa !3
+  %arrayidx33 = getelementptr inbounds i32, ptr %tmp24, i64 %idxprom
 
   ; CHECK: prfm pstl1keep
-  call void @llvm.prefetch(i8* %tmp25, i32 1, i32 3, i32 1)
+  call void @llvm.prefetch(ptr %arrayidx21, i32 1, i32 3, i32 1)
   ret void
 }
 
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) nounwind
 
 !0 = !{!"int", !1}
 !1 = !{!"omnipotent char", !2}

diff  --git a/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll b/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
index 7085bf3962b79..1a20dbfd3be24 100644
--- a/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
@@ -31,8 +31,8 @@ entry:
   %v = alloca i32, align 4
   call void asm sideeffect "mov x9, $0", "N,~{x9}"(i32 48879) #2
   call preserve_mostcc void @preserve_most()
-  %0 = load i32, i32* %v, align 4
+  %0 = load i32, ptr %v, align 4
   %1 = call i32 asm sideeffect "mov ${0:w}, w9", "=r,r"(i32 %0) #2
-  store i32 %1, i32* %v, align 4
+  store i32 %1, ptr %v, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-promote-const.ll b/llvm/test/CodeGen/AArch64/arm64-promote-const.ll
index 431227a0273a2..93ff057a863bf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-promote-const.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-promote-const.ll
@@ -160,12 +160,11 @@ if.end:                                           ; preds = %entry, %if.then
   ret <16 x i8> %mul.i
 }
 
-define void @accessBig(i64* %storage) {
+define void @accessBig(ptr %storage) {
 ; PROMOTED-LABEL: accessBig:
 ; PROMOTED: adrp
 ; PROMOTED: ret
-  %addr = bitcast i64* %storage to <1 x i80>*
-  store <1 x i80> <i80 483673642326615442599424>, <1 x i80>* %addr
+  store <1 x i80> <i80 483673642326615442599424>, ptr %storage
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-redzone.ll b/llvm/test/CodeGen/AArch64/arm64-redzone.ll
index dcb839f4cdd02..fe30a1a98521e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-redzone.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-redzone.ll
@@ -7,12 +7,12 @@ define i32 @foo(i32 %a, i32 %b) nounwind ssp {
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
   %x = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
-  %tmp = load i32, i32* %a.addr, align 4
-  %tmp1 = load i32, i32* %b.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
+  %tmp = load i32, ptr %a.addr, align 4
+  %tmp1 = load i32, ptr %b.addr, align 4
   %add = add nsw i32 %tmp, %tmp1
-  store i32 %add, i32* %x, align 4
-  %tmp2 = load i32, i32* %x, align 4
+  store i32 %add, ptr %x, align 4
+  %tmp2 = load i32, ptr %x, align 4
   ret i32 %tmp2
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll b/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
index 61ffad574efe9..cfd507c60831d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
@@ -1,139 +1,139 @@
 ; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
 
-define i8 @test_64bit_add(i16* %a, i64 %b) {
+define i8 @test_64bit_add(ptr %a, i64 %b) {
 ; CHECK-LABEL: test_64bit_add:
 ; CHECK: ldrh w0, [x0, x1, lsl #1]
 ; CHECK: ret
-  %tmp1 = getelementptr inbounds i16, i16* %a, i64 %b
-  %tmp2 = load i16, i16* %tmp1
+  %tmp1 = getelementptr inbounds i16, ptr %a, i64 %b
+  %tmp2 = load i16, ptr %tmp1
   %tmp3 = trunc i16 %tmp2 to i8
   ret i8 %tmp3
 }
 
 ; These tests are trying to form SEXT and ZEXT operations that never leave i64
 ; space, to make sure LLVM can adapt the offset register correctly.
-define void @ldst_8bit(i8* %base, i64 %offset) minsize {
+define void @ldst_8bit(ptr %base, i64 %offset) minsize {
 ; CHECK-LABEL: ldst_8bit:
 
    %off32.sext.tmp = shl i64 %offset, 32
    %off32.sext = ashr i64 %off32.sext.tmp, 32
-   %addr8_sxtw = getelementptr i8, i8* %base, i64 %off32.sext
-   %val8_sxtw = load volatile i8, i8* %addr8_sxtw
+   %addr8_sxtw = getelementptr i8, ptr %base, i64 %off32.sext
+   %val8_sxtw = load volatile i8, ptr %addr8_sxtw
    %val32_signed = sext i8 %val8_sxtw to i32
-   store volatile i32 %val32_signed, i32* @var_32bit
+   store volatile i32 %val32_signed, ptr @var_32bit
 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
-  %addrint_uxtw = ptrtoint i8* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
-  %val8_uxtw = load volatile i8, i8* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val8_uxtw = load volatile i8, ptr %addr_uxtw
   %newval8 = add i8 %val8_uxtw, 1
-  store volatile i8 %newval8, i8* @var_8bit
+  store volatile i8 %newval8, ptr @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
    ret void
 }
 
 
-define void @ldst_16bit(i16* %base, i64 %offset) minsize {
+define void @ldst_16bit(ptr %base, i64 %offset) minsize {
 ; CHECK-LABEL: ldst_16bit:
 
-  %addrint_uxtw = ptrtoint i16* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
-  %val8_uxtw = load volatile i16, i16* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val8_uxtw = load volatile i16, ptr %addr_uxtw
   %newval8 = add i16 %val8_uxtw, 1
-  store volatile i16 %newval8, i16* @var_16bit
+  store volatile i16 %newval8, ptr @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
-  %base_sxtw = ptrtoint i16* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw.tmp = shl i64 %offset, 32
   %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
-  %val16_sxtw = load volatile i16, i16* %addr_sxtw
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val16_sxtw = load volatile i16, ptr %addr_sxtw
   %val64_signed = sext i16 %val16_sxtw to i64
-  store volatile i64 %val64_signed, i64* @var_64bit
+  store volatile i64 %val64_signed, ptr @var_64bit
 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
 
-  %base_uxtwN = ptrtoint i16* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = and i64 %offset, 4294967295
   %offset2_uxtwN = shl i64 %offset_uxtwN, 1
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
-  %val32 = load volatile i32, i32* @var_32bit
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val32 = load volatile i32, ptr @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
-  store volatile i16 %val16_trunc32, i16* %addr_uxtwN
+  store volatile i16 %val16_trunc32, ptr %addr_uxtwN
 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
    ret void
 }
 
-define void @ldst_32bit(i32* %base, i64 %offset) minsize {
+define void @ldst_32bit(ptr %base, i64 %offset) minsize {
 ; CHECK-LABEL: ldst_32bit:
 
-  %addrint_uxtw = ptrtoint i32* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
-  %val32_uxtw = load volatile i32, i32* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val32_uxtw = load volatile i32, ptr %addr_uxtw
   %newval32 = add i32 %val32_uxtw, 1
-  store volatile i32 %newval32, i32* @var_32bit
+  store volatile i32 %newval32, ptr @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
-  %base_sxtw = ptrtoint i32* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw.tmp = shl i64 %offset, 32
   %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
-  %val32_sxtw = load volatile i32, i32* %addr_sxtw
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val32_sxtw = load volatile i32, ptr %addr_sxtw
   %val64_signed = sext i32 %val32_sxtw to i64
-  store volatile i64 %val64_signed, i64* @var_64bit
+  store volatile i64 %val64_signed, ptr @var_64bit
 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
 
-  %base_uxtwN = ptrtoint i32* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = and i64 %offset, 4294967295
   %offset2_uxtwN = shl i64 %offset_uxtwN, 2
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
-  %val32 = load volatile i32, i32* @var_32bit
-  store volatile i32 %val32, i32* %addr_uxtwN
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val32 = load volatile i32, ptr @var_32bit
+  store volatile i32 %val32, ptr %addr_uxtwN
 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
    ret void
 }
 
-define void @ldst_64bit(i64* %base, i64 %offset) minsize {
+define void @ldst_64bit(ptr %base, i64 %offset) minsize {
 ; CHECK-LABEL: ldst_64bit:
 
-  %addrint_uxtw = ptrtoint i64* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
-  %val64_uxtw = load volatile i64, i64* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val64_uxtw = load volatile i64, ptr %addr_uxtw
   %newval8 = add i64 %val64_uxtw, 1
-  store volatile i64 %newval8, i64* @var_64bit
+  store volatile i64 %newval8, ptr @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
-  %base_sxtw = ptrtoint i64* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw.tmp = shl i64 %offset, 32
   %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
-  %val64_sxtw = load volatile i64, i64* %addr_sxtw
-  store volatile i64 %val64_sxtw, i64* @var_64bit
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val64_sxtw = load volatile i64, ptr %addr_sxtw
+  store volatile i64 %val64_sxtw, ptr @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
 
-  %base_uxtwN = ptrtoint i64* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = and i64 %offset, 4294967295
   %offset2_uxtwN = shl i64 %offset_uxtwN, 3
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
-  %val64 = load volatile i64, i64* @var_64bit
-  store volatile i64 %val64, i64* %addr_uxtwN
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val64 = load volatile i64, ptr @var_64bit
+  store volatile i64 %val64, ptr %addr_uxtwN
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
    ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll b/llvm/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
index d4814dc626098..3e7cf4dc83989 100644
--- a/llvm/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
@@ -8,10 +8,10 @@
 ; been reverted (see PR17975).
 ; XFAIL: *
 
-define void @foo(i64* nocapture %d) {
+define void @foo(ptr nocapture %d) {
 ; CHECK-LABEL: foo:
 ; CHECK: rorv
-  %tmp = load i64, i64* undef, align 8
+  %tmp = load i64, ptr undef, align 8
   %sub397 = sub i64 0, %tmp
   %and398 = and i64 %sub397, 4294967295
   %shr404 = lshr i64 %and398, 0
@@ -28,6 +28,6 @@ define void @foo(i64* nocapture %d) {
   %or438 = or i64 %shl434, %shr437
   %xor439 = xor i64 %or438, %xor428
   %sub441 = sub i64 %xor439, 0
-  store i64 %sub441, i64* %d, align 8
+  store i64 %sub441, ptr %d, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll b/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
index ce9d2c7171d2e..85b179d631e1c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
@@ -45,8 +45,8 @@ define void @foo() {
 ; CHECK-X15-NOT: str x15, [sp
 ; CHECK-X18-NOT: str x18, [sp
 
-  %val = load volatile [30 x i64], [30 x i64]* @var
-  store volatile [30 x i64] %val, [30 x i64]* @var
+  %val = load volatile [30 x i64], ptr @var
+  store volatile [30 x i64] %val, ptr @var
 
 ; CHECK-X9-NOT: ldr x9
 ; CHECK-X10-NOT: ldr x10

diff  --git a/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll b/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll
index b98b11e180e20..98160bb9300cf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll
@@ -12,8 +12,8 @@ declare void @foo()
 
 ; CHECK: error:
 ; CHECK-SAME: AArch64 doesn't support function calls if any of the argument registers is reserved.
-define void @call_memcpy(i8* %out, i8* %in) {
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 800, i1 false)
+define void @call_memcpy(ptr %out, ptr %in) {
+  call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 800, i1 false)
   ret void
 }
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-return-vector.ll b/llvm/test/CodeGen/AArch64/arm64-return-vector.ll
index 2167c6664b9e6..c40a82982cc33 100644
--- a/llvm/test/CodeGen/AArch64/arm64-return-vector.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-return-vector.ll
@@ -2,10 +2,10 @@
 
 ; 2x64 vector should be returned in Q0.
 
-define <2 x double> @test(<2 x double>* %p) nounwind {
+define <2 x double> @test(ptr %p) nounwind {
 ; CHECK: test
 ; CHECK: ldr q0, [x0]
 ; CHECK: ret
-  %tmp1 = load <2 x double>, <2 x double>* %p, align 16
+  %tmp1 = load <2 x double>, ptr %p, align 16
   ret <2 x double> %tmp1
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-returnaddr.ll b/llvm/test/CodeGen/AArch64/arm64-returnaddr.ll
index 006d4a77d8415..db2a134d7a7fb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-returnaddr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-returnaddr.ll
@@ -1,16 +1,16 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
-define i8* @rt0(i32 %x) nounwind readnone {
+define ptr @rt0(i32 %x) nounwind readnone {
 entry:
 ; CHECK-LABEL: rt0:
 ; CHECK: hint #7
 ; CHECK: mov x0, x30
 ; CHECK: ret
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @rt2() nounwind readnone {
+define ptr @rt2() nounwind readnone {
 entry:
 ; CHECK-LABEL: rt2:
 ; CHECK: stp x29, x30, [sp, #-16]!
@@ -22,8 +22,8 @@ entry:
 ; CHECK: mov x0, x30
 ; CHECK: ldp x29, x30, [sp], #16
 ; CHECK: ret
-  %0 = tail call i8* @llvm.returnaddress(i32 2)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 2)
+  ret ptr %0
 }
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll
index 2e2eb4ae84060..90f937afb5a63 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rev.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll
@@ -56,7 +56,7 @@ entry:
   ret i32 %2
 }
 
-define i32 @test_rev_w_srl16_load(i16 *%a) {
+define i32 @test_rev_w_srl16_load(ptr %a) {
 ; CHECK-LABEL: test_rev_w_srl16_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -71,7 +71,7 @@ define i32 @test_rev_w_srl16_load(i16 *%a) {
 ; GISEL-NEXT:    lsr w0, w8, #16
 ; GISEL-NEXT:    ret
 entry:
-  %0 = load i16, i16 *%a
+  %0 = load i16, ptr %a
   %1 = zext i16 %0 to i32
   %2 = tail call i32 @llvm.bswap.i32(i32 %1)
   %3 = lshr i32 %2, 16
@@ -125,7 +125,7 @@ entry:
   ret i64 %2
 }
 
-define i64 @test_rev_x_srl32_load(i32 *%a) {
+define i64 @test_rev_x_srl32_load(ptr %a) {
 ; CHECK-LABEL: test_rev_x_srl32_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -140,7 +140,7 @@ define i64 @test_rev_x_srl32_load(i32 *%a) {
 ; GISEL-NEXT:    lsr x0, x8, #32
 ; GISEL-NEXT:    ret
 entry:
-  %0 = load i32, i32 *%a
+  %0 = load i32, ptr %a
   %1 = zext i32 %0 to i64
   %2 = tail call i64 @llvm.bswap.i64(i64 %1)
   %3 = lshr i64 %2, 32
@@ -244,7 +244,7 @@ entry:
   ret i64 %3
 }
 
-define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
+define <8 x i8> @test_vrev64D8(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64D8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -256,12 +256,12 @@ define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev64.8b v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 	ret <8 x i8> %tmp2
 }
 
-define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
+define <4 x i16> @test_vrev64D16(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64D16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -273,12 +273,12 @@ define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev64.4h v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp1 = load <4 x i16>, ptr %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
 	ret <4 x i16> %tmp2
 }
 
-define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
+define <2 x i32> @test_vrev64D32(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64D32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -290,12 +290,12 @@ define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev64.2s v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
 	%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
 	ret <2 x i32> %tmp2
 }
 
-define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
+define <2 x float> @test_vrev64Df(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64Df:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -307,12 +307,12 @@ define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev64.2s v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp1 = load <2 x float>, ptr %A
 	%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
 	ret <2 x float> %tmp2
 }
 
-define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
+define <16 x i8> @test_vrev64Q8(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64Q8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -324,12 +324,12 @@ define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
 ; GISEL-NEXT:    ldr q0, [x0]
 ; GISEL-NEXT:    rev64.16b v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp1 = load <16 x i8>, ptr %A
 	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
 	ret <16 x i8> %tmp2
 }
 
-define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
+define <8 x i16> @test_vrev64Q16(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64Q16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -341,12 +341,12 @@ define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
 ; GISEL-NEXT:    ldr q0, [x0]
 ; GISEL-NEXT:    rev64.8h v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp1 = load <8 x i16>, ptr %A
 	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
 	ret <8 x i16> %tmp2
 }
 
-define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
+define <4 x i32> @test_vrev64Q32(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64Q32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -358,12 +358,12 @@ define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
 ; GISEL-NEXT:    ldr q0, [x0]
 ; GISEL-NEXT:    rev64.4s v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp1 = load <4 x i32>, ptr %A
 	%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 	ret <4 x i32> %tmp2
 }
 
-define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
+define <4 x float> @test_vrev64Qf(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64Qf:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -375,12 +375,12 @@ define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
 ; GISEL-NEXT:    ldr q0, [x0]
 ; GISEL-NEXT:    rev64.4s v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp1 = load <4 x float>, ptr %A
 	%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 	ret <4 x float> %tmp2
 }
 
-define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
+define <8 x i8> @test_vrev32D8(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev32D8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -392,12 +392,12 @@ define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev32.8b v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
 	ret <8 x i8> %tmp2
 }
 
-define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
+define <4 x i16> @test_vrev32D16(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev32D16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -409,12 +409,12 @@ define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev32.4h v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp1 = load <4 x i16>, ptr %A
 	%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 	ret <4 x i16> %tmp2
 }
 
-define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
+define <16 x i8> @test_vrev32Q8(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev32Q8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -428,12 +428,12 @@ define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
 ; GISEL-NEXT:    ldr q2, [x8, :lo12:.LCPI21_0]
 ; GISEL-NEXT:    tbl.16b v0, { v0, v1 }, v2
 ; GISEL-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp1 = load <16 x i8>, ptr %A
 	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
 	ret <16 x i8> %tmp2
 }
 
-define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
+define <8 x i16> @test_vrev32Q16(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev32Q16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -447,12 +447,12 @@ define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
 ; GISEL-NEXT:    ldr q2, [x8, :lo12:.LCPI22_0]
 ; GISEL-NEXT:    tbl.16b v0, { v0, v1 }, v2
 ; GISEL-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp1 = load <8 x i16>, ptr %A
 	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 	ret <8 x i16> %tmp2
 }
 
-define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
+define <8 x i8> @test_vrev16D8(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev16D8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -464,12 +464,12 @@ define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev16.8b v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 	ret <8 x i8> %tmp2
 }
 
-define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
+define <16 x i8> @test_vrev16Q8(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev16Q8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -483,14 +483,14 @@ define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
 ; GISEL-NEXT:    ldr q2, [x8, :lo12:.LCPI24_0]
 ; GISEL-NEXT:    tbl.16b v0, { v0, v1 }, v2
 ; GISEL-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp1 = load <16 x i8>, ptr %A
 	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
 	ret <16 x i8> %tmp2
 }
 
 ; Undef shuffle indices should not prevent matching to VREV:
 
-define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
+define <8 x i8> @test_vrev64D8_undef(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev64D8_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -502,12 +502,12 @@ define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
 ; GISEL-NEXT:    ldr d0, [x0]
 ; GISEL-NEXT:    rev64.8b v0, v0
 ; GISEL-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
 	ret <8 x i8> %tmp2
 }
 
-define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
+define <8 x i16> @test_vrev32Q16_undef(ptr %A) nounwind {
 ; CHECK-LABEL: test_vrev32Q16_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -521,13 +521,13 @@ define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
 ; GISEL-NEXT:    ldr q2, [x8, :lo12:.LCPI26_0]
 ; GISEL-NEXT:    tbl.16b v0, { v0, v1 }, v2
 ; GISEL-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp1 = load <8 x i16>, ptr %A
 	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
 	ret <8 x i16> %tmp2
 }
 
 ; vrev <4 x i16> should use REV32 and not REV64
-define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst) nounwind ssp {
+define void @test_vrev64(ptr nocapture %source, ptr nocapture %dst) nounwind ssp {
 ; CHECK-LABEL: test_vrev64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    add x8, x1, #2
@@ -544,18 +544,17 @@ define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst
 ; GISEL-NEXT:    st1.h { v0 }[5], [x8]
 ; GISEL-NEXT:    ret
 entry:
-  %0 = bitcast <4 x i16>* %source to <8 x i16>*
-  %tmp2 = load <8 x i16>, <8 x i16>* %0, align 4
+  %tmp2 = load <8 x i16>, ptr %source, align 4
   %tmp3 = extractelement <8 x i16> %tmp2, i32 6
   %tmp5 = insertelement <2 x i16> undef, i16 %tmp3, i32 0
   %tmp9 = extractelement <8 x i16> %tmp2, i32 5
   %tmp11 = insertelement <2 x i16> %tmp5, i16 %tmp9, i32 1
-  store <2 x i16> %tmp11, <2 x i16>* %dst, align 4
+  store <2 x i16> %tmp11, ptr %dst, align 4
   ret void
 }
 
 ; Test vrev of float4
-define void @float_vrev64(float* nocapture %source, <4 x float>* nocapture %dest) nounwind noinline ssp {
+define void @float_vrev64(ptr nocapture %source, ptr nocapture %dest) nounwind noinline ssp {
 ; CHECK-LABEL: float_vrev64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
@@ -575,11 +574,10 @@ define void @float_vrev64(float* nocapture %source, <4 x float>* nocapture %dest
 ; GISEL-NEXT:    str q0, [x1, #176]
 ; GISEL-NEXT:    ret
 entry:
-  %0 = bitcast float* %source to <4 x float>*
-  %tmp2 = load <4 x float>, <4 x float>* %0, align 4
+  %tmp2 = load <4 x float>, ptr %source, align 4
   %tmp5 = shufflevector <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x float> %tmp2, <4 x i32> <i32 0, i32 7, i32 0, i32 0>
-  %arrayidx8 = getelementptr inbounds <4 x float>, <4 x float>* %dest, i32 11
-  store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4
+  %arrayidx8 = getelementptr inbounds <4 x float>, ptr %dest, i32 11
+  store <4 x float> %tmp5, ptr %arrayidx8, align 4
   ret void
 }
 
@@ -630,14 +628,14 @@ entry:
   br label %body
 
 body:
-  %out.6269.i = phi i16* [ undef, %cleanup ], [ undef, %entry ]
-  %0 = load i16, i16* undef, align 2
+  %out.6269.i = phi ptr [ undef, %cleanup ], [ undef, %entry ]
+  %0 = load i16, ptr undef, align 2
   %1 = icmp eq i16 undef, -10240
   br i1 %1, label %fail, label %cleanup
 
 cleanup:
   %or130.i = call i16 @llvm.bswap.i16(i16 %0)
-  store i16 %or130.i, i16* %out.6269.i, align 2
+  store i16 %or130.i, ptr %out.6269.i, align 2
   br label %body
 
 fail:
@@ -646,7 +644,7 @@ fail:
 declare i16 @llvm.bswap.i16(i16)
 
 ; Reduced regression from D120192
-define void @test_bswap32_narrow(i32* %p0, i16* %p1) nounwind {
+define void @test_bswap32_narrow(ptr %p0, ptr %p1) nounwind {
 ; CHECK-LABEL: test_bswap32_narrow:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -669,12 +667,12 @@ define void @test_bswap32_narrow(i32* %p0, i16* %p1) nounwind {
 ; GISEL-NEXT:    strh wzr, [x19]
 ; GISEL-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; GISEL-NEXT:    ret
-  %ld = load i32, i32* %p0, align 4
+  %ld = load i32, ptr %p0, align 4
   %and = and i32 %ld, -65536
   %bswap = tail call i32 @llvm.bswap.i32(i32 %and)
   %and16 = zext i32 %bswap to i64
-  %call17 = tail call i32 bitcast (i32 (...)* @gid_tbl_len to i32 (i64)*)(i64 %and16)
-  store i16 0, i16* %p1, align 4
+  %call17 = tail call i32 @gid_tbl_len(i64 %and16)
+  store i16 0, ptr %p1, align 4
   ret void
 }
 declare i32 @gid_tbl_len(...)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-scaled_iv.ll b/llvm/test/CodeGen/AArch64/arm64-scaled_iv.ll
index 24f04f44c3eab..c4da564434ee9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-scaled_iv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-scaled_iv.ll
@@ -6,7 +6,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "arm64-apple-ios7.0.0"
 
 ; Function Attrs: nounwind ssp
-define void @mulDouble(double* nocapture %a, double* nocapture %b, double* nocapture %c) {
+define void @mulDouble(ptr nocapture %a, ptr nocapture %b, ptr nocapture %c) {
 ; CHECK: @mulDouble
 entry:
   br label %for.body
@@ -17,16 +17,16 @@ for.body:                                         ; preds = %for.body, %entry
 ; CHECK-NOT: phi
   %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
   %tmp = add nsw i64 %indvars.iv, -1
-  %arrayidx = getelementptr inbounds double, double* %b, i64 %tmp
-  %tmp1 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %b, i64 %tmp
+  %tmp1 = load double, ptr %arrayidx, align 8
 ; The induction variable should carry the scaling factor: 1 * 8 = 8.
 ; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8
   %indvars.iv.next = add i64 %indvars.iv, 1
-  %arrayidx2 = getelementptr inbounds double, double* %c, i64 %indvars.iv.next
-  %tmp2 = load double, double* %arrayidx2, align 8
+  %arrayidx2 = getelementptr inbounds double, ptr %c, i64 %indvars.iv.next
+  %tmp2 = load double, ptr %arrayidx2, align 8
   %mul = fmul double %tmp1, %tmp2
-  %arrayidx4 = getelementptr inbounds double, double* %a, i64 %indvars.iv
-  store double %mul, double* %arrayidx4, align 8
+  %arrayidx4 = getelementptr inbounds double, ptr %a, i64 %indvars.iv
+  store double %mul, ptr %arrayidx4, align 8
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
 ; Comparison should be 19 * 8 = 152.
 ; CHECK: icmp eq i32 {{%[^,]+}}, 152

diff  --git a/llvm/test/CodeGen/AArch64/arm64-scvt.ll b/llvm/test/CodeGen/AArch64/arm64-scvt.ll
index 85b94c4c49ab7..5b06993b66161 100644
--- a/llvm/test/CodeGen/AArch64/arm64-scvt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-scvt.ll
@@ -2,63 +2,63 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck --check-prefixes=CHECK,CHECK-CYC %s
 ; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cortex-a57 | FileCheck --check-prefixes=CHECK,CHECK-A57 %s
 
-define float @t1(i32* nocapture %src) nounwind ssp {
+define float @t1(ptr nocapture %src) nounwind ssp {
 ; CHECK-LABEL: t1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    scvtf s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %tmp1 = load i32, i32* %src, align 4
+  %tmp1 = load i32, ptr %src, align 4
   %tmp2 = sitofp i32 %tmp1 to float
   ret float %tmp2
 }
 
-define float @t2(i32* nocapture %src) nounwind ssp {
+define float @t2(ptr nocapture %src) nounwind ssp {
 ; CHECK-LABEL: t2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ucvtf s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %tmp1 = load i32, i32* %src, align 4
+  %tmp1 = load i32, ptr %src, align 4
   %tmp2 = uitofp i32 %tmp1 to float
   ret float %tmp2
 }
 
-define double @t3(i64* nocapture %src) nounwind ssp {
+define double @t3(ptr nocapture %src) nounwind ssp {
 ; CHECK-LABEL: t3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    scvtf d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %tmp1 = load i64, i64* %src, align 4
+  %tmp1 = load i64, ptr %src, align 4
   %tmp2 = sitofp i64 %tmp1 to double
   ret double %tmp2
 }
 
-define double @t4(i64* nocapture %src) nounwind ssp {
+define double @t4(ptr nocapture %src) nounwind ssp {
 ; CHECK-LABEL: t4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ucvtf d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %tmp1 = load i64, i64* %src, align 4
+  %tmp1 = load i64, ptr %src, align 4
   %tmp2 = uitofp i64 %tmp1 to double
   ret double %tmp2
 }
 
 ; rdar://13136456
-define double @t5(i32* nocapture %src) nounwind ssp optsize {
+define double @t5(ptr nocapture %src) nounwind ssp optsize {
 ; CHECK-LABEL: t5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    scvtf d0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %tmp1 = load i32, i32* %src, align 4
+  %tmp1 = load i32, ptr %src, align 4
   %tmp2 = sitofp i32 %tmp1 to double
   ret double %tmp2
 }
@@ -79,7 +79,7 @@ entry:
 ; With loading size: 8, 16, 32, and 64-bits.
 
 ; ********* 1. load with scaled imm to float. *********
-define float @fct1(i8* nocapture %sp0) {
+define float @fct1(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, #1]
@@ -87,14 +87,14 @@ define float @fct1(i8* nocapture %sp0) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = uitofp i8 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @fct2(i16* nocapture %sp0) {
+define float @fct2(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, #2]
@@ -102,14 +102,14 @@ define float @fct2(i16* nocapture %sp0) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = uitofp i16 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @fct3(i32* nocapture %sp0) {
+define float @fct3(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, #4]
@@ -117,15 +117,15 @@ define float @fct3(i32* nocapture %sp0) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = uitofp i32 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; i64 -> f32 is not supported on floating point unit.
-define float @fct4(i64* nocapture %sp0) {
+define float @fct4(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0, #8]
@@ -133,15 +133,15 @@ define float @fct4(i64* nocapture %sp0) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = uitofp i64 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; ********* 2. load with scaled register to float. *********
-define float @fct5(i8* nocapture %sp0, i64 %offset) {
+define float @fct5(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, x1]
@@ -149,14 +149,14 @@ define float @fct5(i8* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = uitofp i8 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @fct6(i16* nocapture %sp0, i64 %offset) {
+define float @fct6(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct6:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, x1, lsl #1]
@@ -164,14 +164,14 @@ define float @fct6(i16* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = uitofp i16 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @fct7(i32* nocapture %sp0, i64 %offset) {
+define float @fct7(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct7:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, x1, lsl #2]
@@ -179,15 +179,15 @@ define float @fct7(i32* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = uitofp i32 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; i64 -> f32 is not supported on floating point unit.
-define float @fct8(i64* nocapture %sp0, i64 %offset) {
+define float @fct8(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0, x1, lsl #3]
@@ -195,8 +195,8 @@ define float @fct8(i64* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = uitofp i64 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
@@ -204,7 +204,7 @@ entry:
 
 
 ; ********* 3. load with scaled imm to double. *********
-define double @fct9(i8* nocapture %sp0) {
+define double @fct9(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct9:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, #1]
@@ -212,14 +212,14 @@ define double @fct9(i8* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = uitofp i8 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct10(i16* nocapture %sp0) {
+define double @fct10(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct10:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, #2]
@@ -227,14 +227,14 @@ define double @fct10(i16* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = uitofp i16 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct11(i32* nocapture %sp0) {
+define double @fct11(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct11:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, #4]
@@ -242,14 +242,14 @@ define double @fct11(i32* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = uitofp i32 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct12(i64* nocapture %sp0) {
+define double @fct12(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct12:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, #8]
@@ -257,15 +257,15 @@ define double @fct12(i64* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = uitofp i64 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
 ; ********* 4. load with scaled register to double. *********
-define double @fct13(i8* nocapture %sp0, i64 %offset) {
+define double @fct13(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct13:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, x1]
@@ -273,14 +273,14 @@ define double @fct13(i8* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = uitofp i8 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct14(i16* nocapture %sp0, i64 %offset) {
+define double @fct14(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct14:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, x1, lsl #1]
@@ -288,14 +288,14 @@ define double @fct14(i16* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = uitofp i16 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct15(i32* nocapture %sp0, i64 %offset) {
+define double @fct15(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct15:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, x1, lsl #2]
@@ -303,14 +303,14 @@ define double @fct15(i32* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = uitofp i32 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct16(i64* nocapture %sp0, i64 %offset) {
+define double @fct16(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, x1, lsl #3]
@@ -318,15 +318,15 @@ define double @fct16(i64* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = uitofp i64 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
 ; ********* 5. load with unscaled imm to float. *********
-define float @fct17(i8* nocapture %sp0) {
+define float @fct17(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct17:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur b0, [x0, #-1]
@@ -334,59 +334,59 @@ define float @fct17(i8* nocapture %sp0) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %bitcast = ptrtoint i8* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, -1
-  %addr = inttoptr i64 %add to i8*
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = uitofp i8 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @fct18(i16* nocapture %sp0) {
+define float @fct18(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct18:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur h0, [x0, #1]
 ; CHECK-NEXT:    ucvtf s0, s0
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i16* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i16*
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = uitofp i16 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @fct19(i32* nocapture %sp0) {
+define float @fct19(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct19:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur s0, [x0, #1]
 ; CHECK-NEXT:    ucvtf s0, s0
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i32* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i32*
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = uitofp i32 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; i64 -> f32 is not supported on floating point unit.
-define float @fct20(i64* nocapture %sp0) {
+define float @fct20(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct20:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur x8, [x0, #1]
 ; CHECK-NEXT:    ucvtf s0, x8
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i64* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i64*
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = uitofp i64 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
@@ -394,7 +394,7 @@ define float @fct20(i64* nocapture %sp0) {
 }
 
 ; ********* 6. load with unscaled imm to double. *********
-define double @fct21(i8* nocapture %sp0) {
+define double @fct21(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct21:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur b0, [x0, #-1]
@@ -402,58 +402,58 @@ define double @fct21(i8* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %bitcast = ptrtoint i8* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, -1
-  %addr = inttoptr i64 %add to i8*
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = uitofp i8 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct22(i16* nocapture %sp0) {
+define double @fct22(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct22:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur h0, [x0, #1]
 ; CHECK-NEXT:    ucvtf d0, d0
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i16* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i16*
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = uitofp i16 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct23(i32* nocapture %sp0) {
+define double @fct23(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct23:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur s0, [x0, #1]
 ; CHECK-NEXT:    ucvtf d0, d0
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i32* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i32*
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = uitofp i32 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @fct24(i64* nocapture %sp0) {
+define double @fct24(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct24:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur d0, [x0, #1]
 ; CHECK-NEXT:    ucvtf d0, d0
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i64* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i64*
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = uitofp i64 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
@@ -461,7 +461,7 @@ define double @fct24(i64* nocapture %sp0) {
 }
 
 ; ********* 1s. load with scaled imm to float. *********
-define float @sfct1(i8* nocapture %sp0) {
+define float @sfct1(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct1:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr b0, [x0, #1]
@@ -478,14 +478,14 @@ define float @sfct1(i8* nocapture %sp0) {
 ; CHECK-A57-NEXT:    fmul s0, s0, s0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = sitofp i8 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @sfct2(i16* nocapture %sp0) {
+define float @sfct2(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct2:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr h0, [x0, #2]
@@ -501,14 +501,14 @@ define float @sfct2(i16* nocapture %sp0) {
 ; CHECK-A57-NEXT:    fmul s0, s0, s0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = sitofp i16 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @sfct3(i32* nocapture %sp0) {
+define float @sfct3(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, #4]
@@ -516,15 +516,15 @@ define float @sfct3(i32* nocapture %sp0) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = sitofp i32 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; i64 -> f32 is not supported on floating point unit.
-define float @sfct4(i64* nocapture %sp0) {
+define float @sfct4(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0, #8]
@@ -532,15 +532,15 @@ define float @sfct4(i64* nocapture %sp0) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = sitofp i64 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; ********* 2s. load with scaled register to float. *********
-define float @sfct5(i8* nocapture %sp0, i64 %offset) {
+define float @sfct5(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-CYC-LABEL: sfct5:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr b0, [x0, x1]
@@ -557,14 +557,14 @@ define float @sfct5(i8* nocapture %sp0, i64 %offset) {
 ; CHECK-A57-NEXT:    fmul s0, s0, s0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = sitofp i8 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @sfct6(i16* nocapture %sp0, i64 %offset) {
+define float @sfct6(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-CYC-LABEL: sfct6:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr h0, [x0, x1, lsl #1]
@@ -580,14 +580,14 @@ define float @sfct6(i16* nocapture %sp0, i64 %offset) {
 ; CHECK-A57-NEXT:    fmul s0, s0, s0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = sitofp i16 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @sfct7(i32* nocapture %sp0, i64 %offset) {
+define float @sfct7(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: sfct7:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, x1, lsl #2]
@@ -595,15 +595,15 @@ define float @sfct7(i32* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = sitofp i32 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; i64 -> f32 is not supported on floating point unit.
-define float @sfct8(i64* nocapture %sp0, i64 %offset) {
+define float @sfct8(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: sfct8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0, x1, lsl #3]
@@ -611,15 +611,15 @@ define float @sfct8(i64* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = sitofp i64 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; ********* 3s. load with scaled imm to double. *********
-define double @sfct9(i8* nocapture %sp0) {
+define double @sfct9(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct9:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb w8, [x0, #1]
@@ -627,14 +627,14 @@ define double @sfct9(i8* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = sitofp i8 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct10(i16* nocapture %sp0) {
+define double @sfct10(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct10:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr h0, [x0, #2]
@@ -651,14 +651,14 @@ define double @sfct10(i16* nocapture %sp0) {
 ; CHECK-A57-NEXT:    fmul d0, d0, d0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = sitofp i16 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct11(i32* nocapture %sp0) {
+define double @sfct11(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct11:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr s0, [x0, #4]
@@ -674,14 +674,14 @@ define double @sfct11(i32* nocapture %sp0) {
 ; CHECK-A57-NEXT:    fmul d0, d0, d0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = sitofp i32 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct12(i64* nocapture %sp0) {
+define double @sfct12(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct12:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, #8]
@@ -689,15 +689,15 @@ define double @sfct12(i64* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = sitofp i64 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
 ; ********* 4s. load with scaled register to double. *********
-define double @sfct13(i8* nocapture %sp0, i64 %offset) {
+define double @sfct13(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: sfct13:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb w8, [x0, x1]
@@ -705,14 +705,14 @@ define double @sfct13(i8* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = sitofp i8 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct14(i16* nocapture %sp0, i64 %offset) {
+define double @sfct14(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-CYC-LABEL: sfct14:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr h0, [x0, x1, lsl #1]
@@ -729,14 +729,14 @@ define double @sfct14(i16* nocapture %sp0, i64 %offset) {
 ; CHECK-A57-NEXT:    fmul d0, d0, d0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = sitofp i16 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct15(i32* nocapture %sp0, i64 %offset) {
+define double @sfct15(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-CYC-LABEL: sfct15:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldr s0, [x0, x1, lsl #2]
@@ -752,14 +752,14 @@ define double @sfct15(i32* nocapture %sp0, i64 %offset) {
 ; CHECK-A57-NEXT:    fmul d0, d0, d0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = sitofp i32 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct16(i64* nocapture %sp0, i64 %offset) {
+define double @sfct16(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: sfct16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, x1, lsl #3]
@@ -767,15 +767,15 @@ define double @sfct16(i64* nocapture %sp0, i64 %offset) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = sitofp i64 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
 ; ********* 5s. load with unscaled imm to float. *********
-define float @sfct17(i8* nocapture %sp0) {
+define float @sfct17(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct17:
 ; CHECK-CYC:       // %bb.0: // %entry
 ; CHECK-CYC-NEXT:    ldur b0, [x0, #-1]
@@ -792,16 +792,16 @@ define float @sfct17(i8* nocapture %sp0) {
 ; CHECK-A57-NEXT:    fmul s0, s0, s0
 ; CHECK-A57-NEXT:    ret
 entry:
-  %bitcast = ptrtoint i8* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, -1
-  %addr = inttoptr i64 %add to i8*
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = sitofp i8 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @sfct18(i16* nocapture %sp0) {
+define float @sfct18(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct18:
 ; CHECK-CYC:       // %bb.0:
 ; CHECK-CYC-NEXT:    ldur h0, [x0, #1]
@@ -816,43 +816,43 @@ define float @sfct18(i16* nocapture %sp0) {
 ; CHECK-A57-NEXT:    scvtf s0, w8
 ; CHECK-A57-NEXT:    fmul s0, s0, s0
 ; CHECK-A57-NEXT:    ret
-  %bitcast = ptrtoint i16* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i16*
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = sitofp i16 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define float @sfct19(i32* nocapture %sp0) {
+define float @sfct19(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct19:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur s0, [x0, #1]
 ; CHECK-NEXT:    scvtf s0, s0
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i32* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i32*
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = sitofp i32 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
 ; i64 -> f32 is not supported on floating point unit.
-define float @sfct20(i64* nocapture %sp0) {
+define float @sfct20(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct20:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur x8, [x0, #1]
 ; CHECK-NEXT:    scvtf s0, x8
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i64* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i64*
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = sitofp i64 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
@@ -860,7 +860,7 @@ define float @sfct20(i64* nocapture %sp0) {
 }
 
 ; ********* 6s. load with unscaled imm to double. *********
-define double @sfct21(i8* nocapture %sp0) {
+define double @sfct21(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct21:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldursb w8, [x0, #-1]
@@ -868,16 +868,16 @@ define double @sfct21(i8* nocapture %sp0) {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %bitcast = ptrtoint i8* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, -1
-  %addr = inttoptr i64 %add to i8*
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = sitofp i8 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct22(i16* nocapture %sp0) {
+define double @sfct22(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct22:
 ; CHECK-CYC:       // %bb.0:
 ; CHECK-CYC-NEXT:    ldur h0, [x0, #1]
@@ -893,16 +893,16 @@ define double @sfct22(i16* nocapture %sp0) {
 ; CHECK-A57-NEXT:    scvtf d0, w8
 ; CHECK-A57-NEXT:    fmul d0, d0, d0
 ; CHECK-A57-NEXT:    ret
-  %bitcast = ptrtoint i16* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i16*
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %val = sitofp i16 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct23(i32* nocapture %sp0) {
+define double @sfct23(ptr nocapture %sp0) {
 ; CHECK-CYC-LABEL: sfct23:
 ; CHECK-CYC:       // %bb.0:
 ; CHECK-CYC-NEXT:    ldur s0, [x0, #1]
@@ -917,26 +917,26 @@ define double @sfct23(i32* nocapture %sp0) {
 ; CHECK-A57-NEXT:    scvtf d0, w8
 ; CHECK-A57-NEXT:    fmul d0, d0, d0
 ; CHECK-A57-NEXT:    ret
-  %bitcast = ptrtoint i32* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i32*
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = sitofp i32 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
 }
 
-define double @sfct24(i64* nocapture %sp0) {
+define double @sfct24(ptr nocapture %sp0) {
 ; CHECK-LABEL: sfct24:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur d0, [x0, #1]
 ; CHECK-NEXT:    scvtf d0, d0
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
-  %bitcast = ptrtoint i64* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, 1
-  %addr = inttoptr i64 %add to i64*
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %val = sitofp i64 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i
@@ -944,7 +944,7 @@ define double @sfct24(i64* nocapture %sp0) {
 }
 
 ; Check that we do not use SSHLL code sequence when code size is a concern.
-define float @codesize_sfct17(i8* nocapture %sp0) optsize {
+define float @codesize_sfct17(ptr nocapture %sp0) optsize {
 ; CHECK-LABEL: codesize_sfct17:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldursb w8, [x0, #-1]
@@ -952,16 +952,16 @@ define float @codesize_sfct17(i8* nocapture %sp0) optsize {
 ; CHECK-NEXT:    fmul s0, s0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %bitcast = ptrtoint i8* %sp0 to i64
+  %bitcast = ptrtoint ptr %sp0 to i64
   %add = add i64 %bitcast, -1
-  %addr = inttoptr i64 %add to i8*
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = inttoptr i64 %add to ptr
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %val = sitofp i8 %pix_sp0.0.copyload to float
   %vmull.i = fmul float %val, %val
   ret float %vmull.i
 }
 
-define double @codesize_sfct11(i32* nocapture %sp0) minsize {
+define double @codesize_sfct11(ptr nocapture %sp0) minsize {
 ; CHECK-LABEL: codesize_sfct11:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0, #4]
@@ -969,8 +969,8 @@ define double @codesize_sfct11(i32* nocapture %sp0) minsize {
 ; CHECK-NEXT:    fmul d0, d0, d0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %val = sitofp i32 %pix_sp0.0.copyload to double
   %vmull.i = fmul double %val, %val
   ret double %vmull.i

diff  --git a/llvm/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll b/llvm/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll
index b26608565cf34..b9cffbcbdbd67 100644
--- a/llvm/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll
@@ -16,7 +16,7 @@ define <4 x float> @foo(<4 x float> %val, <4 x float> %test) nounwind {
 ; Make sure the operation doesn't try to get folded when the sizes don't match,
 ; as that ends up crashing later when trying to form a bitcast operation for
 ; the folded nodes.
-define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwind {
+define void @foo1(<4 x float> %val, <4 x float> %test, ptr %p) nounwind {
 ; CHECK-LABEL: foo1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    movi.4s v2, #1
@@ -31,7 +31,7 @@ define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwin
   %cmp = fcmp oeq <4 x float> %val, %test
   %ext = zext <4 x i1> %cmp to <4 x i32>
   %result = sitofp <4 x i32> %ext to <4 x double>
-  store <4 x double> %result, <4 x double>* %p
+  store <4 x double> %result, ptr %p
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index 46a357543acac..79d3291b2fa97 100644
--- a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -56,8 +56,8 @@ define i32 @foo(i32 %a, i32 %b) {
   br i1 %tmp2, label %true, label %false
 
 true:
-  store i32 %a, i32* %tmp, align 4
-  %tmp4 = call i32 @doSomething(i32 0, i32* %tmp)
+  store i32 %a, ptr %tmp, align 4
+  %tmp4 = call i32 @doSomething(i32 0, ptr %tmp)
   br label %false
 
 false:
@@ -66,7 +66,7 @@ false:
 }
 
 ; Function Attrs: optsize
-declare i32 @doSomething(i32, i32*)
+declare i32 @doSomething(i32, ptr)
 
 
 ; Check that we do not perform the restore inside the loop whereas the save
@@ -144,7 +144,7 @@ entry:
 for.body:                                         ; preds = %entry, %for.body
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
   %sum.04 = phi i32 [ %add, %for.body ], [ 0, %entry ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.04
   %inc = add nuw nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, 10
@@ -223,7 +223,7 @@ entry:
 for.body:                                         ; preds = %for.body, %entry
   %i.04 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %sum.03 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.03
   %inc = add nuw nsw i32 %i.04, 1
   %exitcond = icmp eq i32 %inc, 10
@@ -310,14 +310,14 @@ entry:
 for.body:                                         ; preds = %entry, %for.body
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
   %sum.04 = phi i32 [ %add, %for.body ], [ 0, %entry ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.04
   %inc = add nuw nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, 10
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
-  tail call void bitcast (void (...)* @somethingElse to void ()*)()
+  tail call void @somethingElse()
   %shl = shl i32 %add, 3
   br label %if.end
 
@@ -414,13 +414,13 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  tail call void bitcast (void (...)* @somethingElse to void ()*)()
+  tail call void @somethingElse()
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %if.then
   %i.05 = phi i32 [ 0, %if.then ], [ %inc, %for.body ]
   %sum.04 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.04
   %inc = add nuw nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, 10
@@ -512,20 +512,19 @@ define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind uwtable {
 ; DISABLE-NEXT:    .cfi_def_cfa_offset 0
 ; DISABLE-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
+  %ap = alloca ptr, align 8
   %tobool = icmp eq i32 %cond, 0
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
+  call void @llvm.va_start(ptr %ap)
   %cmp6 = icmp sgt i32 %count, 0
   br i1 %cmp6, label %for.body, label %for.end
 
 for.body:                                         ; preds = %if.then, %for.body
   %i.08 = phi i32 [ %inc, %for.body ], [ 0, %if.then ]
   %sum.07 = phi i32 [ %add, %for.body ], [ 0, %if.then ]
-  %0 = va_arg i8** %ap, i32
+  %0 = va_arg ptr %ap, i32
   %add = add nsw i32 %sum.07, %0
   %inc = add nuw nsw i32 %i.08, 1
   %exitcond = icmp eq i32 %inc, %count
@@ -533,7 +532,7 @@ for.body:                                         ; preds = %if.then, %for.body
 
 for.end:                                          ; preds = %for.body, %if.then
   %sum.0.lcssa = phi i32 [ 0, %if.then ], [ %add, %for.body ]
-  call void @llvm.va_end(i8* %ap1)
+  call void @llvm.va_end(ptr %ap)
   br label %if.end
 
 if.else:                                          ; preds = %entry
@@ -545,9 +544,9 @@ if.end:                                           ; preds = %if.else, %for.end
   ret i32 %sum.1
 }
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
 ; Check that we handle inline asm correctly.
 define i32 @inlineAsm(i32 %cond, i32 %N) {
@@ -821,9 +820,9 @@ if.then:
 
 for.body:                                         ; preds = %for.body, %entry
   %sum.03 = phi i32 [ 0, %if.then ], [ %add, %for.body ]
-  %call = tail call i32 bitcast (i32 (...)* @something to i32 ()*)()
+  %call = tail call i32 @something()
   %add = add nsw i32 %call, %sum.03
-  store i32 %add, i32* %ptr
+  store i32 %add, ptr %ptr
   br label %for.body
 
 if.end:
@@ -910,7 +909,7 @@ for.body:                                         ; preds = %for.body, %entry
   %sum.03 = phi i32 [ 0, %if.then ], [ %add, %body1 ], [ 1, %body2]
   %call = tail call i32 asm "mov $0, #0", "=r,~{x19}"()
   %add = add nsw i32 %call, %sum.03
-  store i32 %add, i32* %ptr
+  store i32 %add, ptr %ptr
   br i1 undef, label %body1, label %body2
 
 body1:
@@ -981,21 +980,19 @@ body:                                             ; preds = %entry
   br i1 undef, label %loop2a, label %end
 
 loop1:                                            ; preds = %loop2a, %loop2b
-  %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
-  %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
-  %0 = icmp eq i32* %var, null
-  %next.load = load i32*, i32** undef
+  %var.phi = phi ptr [ %next.phi, %loop2b ], [ %var, %loop2a ]
+  %next.phi = phi ptr [ %next.load, %loop2b ], [ %next.var, %loop2a ]
+  %0 = icmp eq ptr %var, null
+  %next.load = load ptr, ptr undef
   br i1 %0, label %loop2a, label %loop2b
 
 loop2a:                                           ; preds = %loop1, %body, %entry
-  %var = phi i32* [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
-  %next.var = phi i32* [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
+  %var = phi ptr [ null, %body ], [ null, %entry ], [ %next.phi, %loop1 ]
+  %next.var = phi ptr [ undef, %body ], [ null, %entry ], [ %next.load, %loop1 ]
   br label %loop1
 
 loop2b:                                           ; preds = %loop1
-  %gep1 = bitcast i32* %var.phi to i32*
-  %next.ptr = bitcast i32* %gep1 to i32**
-  store i32* %next.phi, i32** %next.ptr
+  store ptr %next.phi, ptr %var.phi
   br label %loop1
 
 end:
@@ -1004,7 +1001,7 @@ end:
 
 ; Re-aligned stack pointer.  See bug 26642.  Avoid clobbering live
 ; values in the prologue when re-aligning the stack pointer.
-define i32 @stack_realign(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) {
+define i32 @stack_realign(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2) {
 ; ENABLE-LABEL: stack_realign:
 ; ENABLE:       ; %bb.0:
 ; ENABLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -1057,14 +1054,14 @@ define i32 @stack_realign(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) {
   br i1 %tmp2, label %true, label %false
 
 true:
-  store i32 %a, i32* %tmp, align 4
-  %tmp4 = load i32, i32* %tmp
+  store i32 %a, ptr %tmp, align 4
+  %tmp4 = load i32, ptr %tmp
   br label %false
 
 false:
   %tmp.0 = phi i32 [ %tmp4, %true ], [ %a, %0 ]
-  store i32 %shl1, i32* %ptr1
-  store i32 %shl2, i32* %ptr2
+  store i32 %shl1, ptr %ptr1
+  store i32 %shl2, ptr %ptr2
   ret i32 %tmp.0
 }
 
@@ -1073,7 +1070,7 @@ false:
 ; ensuring we have a scratch register to re-align the stack pointer is
 ; too complicated.  Output should be the same for both enabled and
 ; disabled shrink wrapping.
-define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3, i32* %ptr4, i32* %ptr5, i32* %ptr6) {
+define void @stack_realign2(i32 %a, i32 %b, ptr %ptr1, ptr %ptr2, ptr %ptr3, ptr %ptr4, ptr %ptr5, ptr %ptr6) {
 ; ENABLE-LABEL: stack_realign2:
 ; ENABLE:       ; %bb.0:
 ; ENABLE-NEXT:    stp x28, x27, [sp, #-96]! ; 16-byte Folded Spill
@@ -1208,29 +1205,29 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
   br i1 %cmp, label %true, label %false
 
 true:
-  store i32 %a, i32* %tmp, align 4
+  store i32 %a, ptr %tmp, align 4
   call void asm sideeffect "nop", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28}"() nounwind
   br label %false
 
 false:
-  store i32 %tmp1, i32* %ptr1, align 4
-  store i32 %tmp2, i32* %ptr2, align 4
-  store i32 %tmp3, i32* %ptr3, align 4
-  store i32 %tmp4, i32* %ptr4, align 4
-  store i32 %tmp5, i32* %ptr5, align 4
-  store i32 %tmp6, i32* %ptr6, align 4
-  %idx1 = getelementptr inbounds i32, i32* %ptr1, i64 1
-  store i32 %a, i32* %idx1, align 4
-  %idx2 = getelementptr inbounds i32, i32* %ptr1, i64 2
-  store i32 %b, i32* %idx2, align 4
-  %idx3 = getelementptr inbounds i32, i32* %ptr1, i64 3
-  store i32 %tmp7, i32* %idx3, align 4
-  %idx4 = getelementptr inbounds i32, i32* %ptr1, i64 4
-  store i32 %tmp8, i32* %idx4, align 4
-  %idx5 = getelementptr inbounds i32, i32* %ptr1, i64 5
-  store i32 %tmp9, i32* %idx5, align 4
-  %idx6 = getelementptr inbounds i32, i32* %ptr1, i64 6
-  store i32 %tmp10, i32* %idx6, align 4
+  store i32 %tmp1, ptr %ptr1, align 4
+  store i32 %tmp2, ptr %ptr2, align 4
+  store i32 %tmp3, ptr %ptr3, align 4
+  store i32 %tmp4, ptr %ptr4, align 4
+  store i32 %tmp5, ptr %ptr5, align 4
+  store i32 %tmp6, ptr %ptr6, align 4
+  %idx1 = getelementptr inbounds i32, ptr %ptr1, i64 1
+  store i32 %a, ptr %idx1, align 4
+  %idx2 = getelementptr inbounds i32, ptr %ptr1, i64 2
+  store i32 %b, ptr %idx2, align 4
+  %idx3 = getelementptr inbounds i32, ptr %ptr1, i64 3
+  store i32 %tmp7, ptr %idx3, align 4
+  %idx4 = getelementptr inbounds i32, ptr %ptr1, i64 4
+  store i32 %tmp8, ptr %idx4, align 4
+  %idx5 = getelementptr inbounds i32, ptr %ptr1, i64 5
+  store i32 %tmp9, ptr %idx5, align 4
+  %idx6 = getelementptr inbounds i32, ptr %ptr1, i64 6
+  store i32 %tmp10, ptr %idx6, align 4
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll b/llvm/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
index 169ad4bf63361..239a04f7d2c7c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
@@ -14,8 +14,8 @@ define dso_local float @foo() {
 ; CHECK: ldr [[SREG:s[0-9]+]], [x[[VARBASE]],
 ; CHECK: str wzr, [x[[VARBASE]],
 
-  %val = load i32, i32* @var, align 4
-  store i32 0, i32* @var, align 4
+  %val = load i32, ptr @var, align 4
+  store i32 0, ptr @var, align 4
 
   %fltval = sitofp i32 %val to float
   ret float %fltval

diff  --git a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
index 0e3a29caba413..e180afa09d5ac 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define void @testLeftGood8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testLeftGood8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood8x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sli.8b v0, v1, #3
@@ -10,11 +10,11 @@ define void @testLeftGood8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) no
   %and.i = and <8 x i8> %src1, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %vshl_n = shl <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <8 x i8> %and.i, %vshl_n
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLeftBad8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testLeftBad8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad8x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v2, #165
@@ -26,11 +26,11 @@ define void @testLeftBad8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nou
   %and.i = and <8 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
   %vshl_n = shl <8 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %result = or <8 x i8> %and.i, %vshl_n
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testRightGood8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testRightGood8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood8x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sri.8b v0, v1, #3
@@ -39,11 +39,11 @@ define void @testRightGood8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) n
   %and.i = and <8 x i8> %src1, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
   %vshl_n = lshr <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <8 x i8> %and.i, %vshl_n
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testRightBad8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testRightBad8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad8x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v2, #165
@@ -55,11 +55,11 @@ define void @testRightBad8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) no
   %and.i = and <8 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
   %vshl_n = lshr <8 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %result = or <8 x i8> %and.i, %vshl_n
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLeftGood16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testLeftGood16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood16x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sli.16b v0, v1, #3
@@ -68,11 +68,11 @@ define void @testLeftGood16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest
   %and.i = and <16 x i8> %src1, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %vshl_n = shl <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <16 x i8> %and.i, %vshl_n
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftBad16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testLeftBad16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad16x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v2, #165
@@ -84,11 +84,11 @@ define void @testLeftBad16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest)
   %and.i = and <16 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
   %vshl_n = shl <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %result = or <16 x i8> %and.i, %vshl_n
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightGood16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testRightGood16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood16x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sri.16b v0, v1, #3
@@ -97,11 +97,11 @@ define void @testRightGood16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %des
   %and.i = and <16 x i8> %src1, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
   %vshl_n = lshr <16 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <16 x i8> %and.i, %vshl_n
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightBad16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testRightBad16x8(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad16x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v2, #165
@@ -113,11 +113,11 @@ define void @testRightBad16x8(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest
   %and.i = and <16 x i8> %src1, <i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165, i8 165>
   %vshl_n = lshr <16 x i8> %src2, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %result = or <16 x i8> %and.i, %vshl_n
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftGood4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testLeftGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood4x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sli.4h v0, v1, #14
@@ -126,11 +126,11 @@ define void @testLeftGood4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest
   %and.i = and <4 x i16> %src1, <i16 16383, i16 16383, i16 16383, i16 16383>
   %vshl_n = shl <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLeftBad4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testLeftBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad4x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #16500
@@ -143,11 +143,11 @@ define void @testLeftBad4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest)
   %and.i = and <4 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = shl <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testRightGood4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testRightGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood4x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sri.4h v0, v1, #14
@@ -156,11 +156,11 @@ define void @testRightGood4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %des
   %and.i = and <4 x i16> %src1, <i16 65532, i16 65532, i16 65532, i16 65532>
   %vshl_n = lshr <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testRightBad4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testRightBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad4x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #16500
@@ -172,11 +172,11 @@ define void @testRightBad4x16(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest
   %and.i = and <4 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = lshr <4 x i16> %src2, <i16 14, i16 14, i16 14, i16 14>
   %result = or <4 x i16> %and.i, %vshl_n
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLeftGood8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testLeftGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood8x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sli.8h v0, v1, #14
@@ -185,11 +185,11 @@ define void @testLeftGood8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest
   %and.i = and <8 x i16> %src1, <i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383, i16 16383>
   %vshl_n = shl <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftBad8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testLeftBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad8x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #16500
@@ -202,11 +202,11 @@ define void @testLeftBad8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest)
   %and.i = and <8 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = shl <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightGood8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testRightGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood8x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sri.8h v0, v1, #14
@@ -215,11 +215,11 @@ define void @testRightGood8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %des
   %and.i = and <8 x i16> %src1, <i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532, i16 65532>
   %vshl_n = lshr <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightBad8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testRightBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad8x16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #16500
@@ -231,11 +231,11 @@ define void @testRightBad8x16(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest
   %and.i = and <8 x i16> %src1, <i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500, i16 16500>
   %vshl_n = lshr <8 x i16> %src2, <i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14, i16 14>
   %result = or <8 x i16> %and.i, %vshl_n
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftGood2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testLeftGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood2x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sli.2s v0, v1, #22
@@ -244,11 +244,11 @@ define void @testLeftGood2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest
   %and.i = and <2 x i32> %src1, <i32 4194303, i32 4194303>
   %vshl_n = shl <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLeftBad2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testLeftBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad2x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #4194300
@@ -261,11 +261,11 @@ define void @testLeftBad2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest)
   %and.i = and <2 x i32> %src1, <i32 4194300, i32 4194300>
   %vshl_n = shl <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testRightGood2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testRightGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood2x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sri.2s v0, v1, #22
@@ -274,11 +274,11 @@ define void @testRightGood2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %des
   %and.i = and <2 x i32> %src1, <i32 4294966272, i32 4294966272>
   %vshl_n = lshr <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testRightBad2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testRightBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad2x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #4194300
@@ -291,11 +291,11 @@ define void @testRightBad2x32(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest
   %and.i = and <2 x i32> %src1, <i32 4194300, i32 4194300>
   %vshl_n = lshr <2 x i32> %src2, <i32 22, i32 22>
   %result = or <2 x i32> %and.i, %vshl_n
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLeftGood4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testLeftGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood4x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sli.4s v0, v1, #22
@@ -304,11 +304,11 @@ define void @testLeftGood4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest
   %and.i = and <4 x i32> %src1, <i32 4194303, i32 4194303, i32 4194303, i32 4194303>
   %vshl_n = shl <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftBad4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testLeftBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad4x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #4194300
@@ -321,11 +321,11 @@ define void @testLeftBad4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest)
   %and.i = and <4 x i32> %src1, <i32 4194300, i32 4194300, i32 4194300, i32 4194300>
   %vshl_n = shl <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightGood4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testRightGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood4x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sri.4s v0, v1, #22
@@ -334,11 +334,11 @@ define void @testRightGood4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %des
   %and.i = and <4 x i32> %src1, <i32 4294966272, i32 4294966272, i32 4294966272, i32 4294966272>
   %vshl_n = lshr <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad4x32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #4194300
@@ -351,11 +351,11 @@ define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest
   %and.i = and <4 x i32> %src1, <i32 4194300, i32 4194300, i32 4194300, i32 4194300>
   %vshl_n = lshr <4 x i32> %src2, <i32 22, i32 22, i32 22, i32 22>
   %result = or <4 x i32> %and.i, %vshl_n
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind {
+define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood2x64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sli.2d v0, v1, #48
@@ -364,11 +364,11 @@ define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest
   %and.i = and <2 x i64> %src1, <i64 281474976710655, i64 281474976710655>
   %vshl_n = shl <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
-  store <2 x i64> %result, <2 x i64>* %dest, align 16
+  store <2 x i64> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind {
+define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad2x64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #10
@@ -382,11 +382,11 @@ define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest)
   %and.i = and <2 x i64> %src1, <i64 281474976710666, i64 281474976710666>
   %vshl_n = shl <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
-  store <2 x i64> %result, <2 x i64>* %dest, align 16
+  store <2 x i64> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind {
+define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood2x64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sri.2d v0, v1, #48
@@ -395,11 +395,11 @@ define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %des
   %and.i = and <2 x i64> %src1, <i64 18446744073709486080, i64 18446744073709486080>
   %vshl_n = lshr <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
-  store <2 x i64> %result, <2 x i64>* %dest, align 16
+  store <2 x i64> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest) nounwind {
+define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad2x64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #10
@@ -413,11 +413,11 @@ define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, <2 x i64>* %dest
   %and.i = and <2 x i64> %src1, <i64 281474976710666, i64 281474976710666>
   %vshl_n = lshr <2 x i64> %src2, <i64 48, i64 48>
   %result = or <2 x i64> %and.i, %vshl_n
-  store <2 x i64> %result, <2 x i64>* %dest, align 16
+  store <2 x i64> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftShouldNotCreateSLI1x128(<1 x i128> %src1, <1 x i128> %src2, <1 x i128>* %dest) nounwind {
+define void @testLeftShouldNotCreateSLI1x128(<1 x i128> %src1, <1 x i128> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftShouldNotCreateSLI1x128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    bfi x1, x2, #6, #58
@@ -426,11 +426,11 @@ define void @testLeftShouldNotCreateSLI1x128(<1 x i128> %src1, <1 x i128> %src2,
   %and.i = and <1 x i128> %src1, <i128 1180591620717411303423>
   %vshl_n = shl <1 x i128> %src2, <i128 70>
   %result = or <1 x i128> %and.i, %vshl_n
-  store <1 x i128> %result, <1 x i128>* %dest, align 16
+  store <1 x i128> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLeftNotAllConstantBuildVec8x8(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testLeftNotAllConstantBuildVec8x8(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftNotAllConstantBuildVec8x8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, .LCPI29_0
@@ -443,6 +443,6 @@ define void @testLeftNotAllConstantBuildVec8x8(<8 x i8> %src1, <8 x i8> %src2, <
   %and.i = and <8 x i8> %src1, <i8 7, i8 7, i8 255, i8 7, i8 7, i8 7, i8 255, i8 7>
   %vshl_n = shl <8 x i8> %src2, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %result = or <8 x i8> %and.i, %vshl_n
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-spill-lr.ll b/llvm/test/CodeGen/AArch64/arm64-spill-lr.ll
index 2ea5d7810a146..24bc04fa0a252 100644
--- a/llvm/test/CodeGen/AArch64/arm64-spill-lr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-spill-lr.ll
@@ -9,33 +9,32 @@
 define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) nounwind {
 entry:
   %stack = alloca [128 x i32], align 4
-  %0 = bitcast [128 x i32]* %stack to i8*
   %idxprom = sext i32 %a to i64
-  %arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* %stack, i64 0, i64 %idxprom
-  store i32 %b, i32* %arrayidx, align 4
-  %1 = load volatile i32, i32* @bar, align 4
-  %2 = load volatile i32, i32* @bar, align 4
-  %3 = load volatile i32, i32* @bar, align 4
-  %4 = load volatile i32, i32* @bar, align 4
-  %5 = load volatile i32, i32* @bar, align 4
-  %6 = load volatile i32, i32* @bar, align 4
-  %7 = load volatile i32, i32* @bar, align 4
-  %8 = load volatile i32, i32* @bar, align 4
-  %9 = load volatile i32, i32* @bar, align 4
-  %10 = load volatile i32, i32* @bar, align 4
-  %11 = load volatile i32, i32* @bar, align 4
-  %12 = load volatile i32, i32* @bar, align 4
-  %13 = load volatile i32, i32* @bar, align 4
-  %14 = load volatile i32, i32* @bar, align 4
-  %15 = load volatile i32, i32* @bar, align 4
-  %16 = load volatile i32, i32* @bar, align 4
-  %17 = load volatile i32, i32* @bar, align 4
-  %18 = load volatile i32, i32* @bar, align 4
-  %19 = load volatile i32, i32* @bar, align 4
-  %20 = load volatile i32, i32* @bar, align 4
+  %arrayidx = getelementptr inbounds [128 x i32], ptr %stack, i64 0, i64 %idxprom
+  store i32 %b, ptr %arrayidx, align 4
+  %0 = load volatile i32, ptr @bar, align 4
+  %1 = load volatile i32, ptr @bar, align 4
+  %2 = load volatile i32, ptr @bar, align 4
+  %3 = load volatile i32, ptr @bar, align 4
+  %4 = load volatile i32, ptr @bar, align 4
+  %5 = load volatile i32, ptr @bar, align 4
+  %6 = load volatile i32, ptr @bar, align 4
+  %7 = load volatile i32, ptr @bar, align 4
+  %8 = load volatile i32, ptr @bar, align 4
+  %9 = load volatile i32, ptr @bar, align 4
+  %10 = load volatile i32, ptr @bar, align 4
+  %11 = load volatile i32, ptr @bar, align 4
+  %12 = load volatile i32, ptr @bar, align 4
+  %13 = load volatile i32, ptr @bar, align 4
+  %14 = load volatile i32, ptr @bar, align 4
+  %15 = load volatile i32, ptr @bar, align 4
+  %16 = load volatile i32, ptr @bar, align 4
+  %17 = load volatile i32, ptr @bar, align 4
+  %18 = load volatile i32, ptr @bar, align 4
+  %19 = load volatile i32, ptr @bar, align 4
   %idxprom1 = sext i32 %c to i64
-  %arrayidx2 = getelementptr inbounds [128 x i32], [128 x i32]* %stack, i64 0, i64 %idxprom1
-  %21 = load i32, i32* %arrayidx2, align 4
+  %arrayidx2 = getelementptr inbounds [128 x i32], ptr %stack, i64 0, i64 %idxprom1
+  %20 = load i32, ptr %arrayidx2, align 4
   %factor = mul i32 %h, -2
   %factor67 = mul i32 %g, -2
   %factor68 = mul i32 %f, -2
@@ -43,26 +42,26 @@ entry:
   %factor70 = mul i32 %d, -2
   %factor71 = mul i32 %c, -2
   %factor72 = mul i32 %b, -2
-  %sum = add i32 %2, %1
-  %sum73 = add i32 %sum, %3
-  %sum74 = add i32 %sum73, %4
-  %sum75 = add i32 %sum74, %5
-  %sum76 = add i32 %sum75, %6
-  %sum77 = add i32 %sum76, %7
-  %sum78 = add i32 %sum77, %8
-  %sum79 = add i32 %sum78, %9
-  %sum80 = add i32 %sum79, %10
-  %sum81 = add i32 %sum80, %11
-  %sum82 = add i32 %sum81, %12
-  %sum83 = add i32 %sum82, %13
-  %sum84 = add i32 %sum83, %14
-  %sum85 = add i32 %sum84, %15
-  %sum86 = add i32 %sum85, %16
-  %sum87 = add i32 %sum86, %17
-  %sum88 = add i32 %sum87, %18
-  %sum89 = add i32 %sum88, %19
-  %sum90 = add i32 %sum89, %20
-  %sub15 = sub i32 %21, %sum90
+  %sum = add i32 %1, %0
+  %sum73 = add i32 %sum, %2
+  %sum74 = add i32 %sum73, %3
+  %sum75 = add i32 %sum74, %4
+  %sum76 = add i32 %sum75, %5
+  %sum77 = add i32 %sum76, %6
+  %sum78 = add i32 %sum77, %7
+  %sum79 = add i32 %sum78, %8
+  %sum80 = add i32 %sum79, %9
+  %sum81 = add i32 %sum80, %10
+  %sum82 = add i32 %sum81, %11
+  %sum83 = add i32 %sum82, %12
+  %sum84 = add i32 %sum83, %13
+  %sum85 = add i32 %sum84, %14
+  %sum86 = add i32 %sum85, %15
+  %sum87 = add i32 %sum86, %16
+  %sum88 = add i32 %sum87, %17
+  %sum89 = add i32 %sum88, %18
+  %sum90 = add i32 %sum89, %19
+  %sub15 = sub i32 %20, %sum90
   %sub16 = add i32 %sub15, %factor
   %sub17 = add i32 %sub16, %factor67
   %sub18 = add i32 %sub17, %factor68

diff  --git a/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll b/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll
index e51cdbfac73cd..0578ab585402a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll
@@ -8,7 +8,7 @@
 ; CHECK: remark: /tmp/kk.c:3:20: 1 spills 3.187500e+01 total spills cost 1 reloads 3.187500e+01 total reloads cost generated in loop{{$}}
 ; THRESHOLD-NOT: remark
 
-define void @fpr128(<4 x float>* %p) nounwind ssp {
+define void @fpr128(ptr %p) nounwind ssp {
 entry:
   br label %loop, !dbg !8
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-spill-remarks.ll b/llvm/test/CodeGen/AArch64/arm64-spill-remarks.ll
index 647bffa69a08a..3600bb7fcd188 100644
--- a/llvm/test/CodeGen/AArch64/arm64-spill-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-spill-remarks.ll
@@ -133,7 +133,7 @@
 ; THRESHOLD_YAML:   - String:          generated in loop
 ; THRESHOLD_YAML: ...
 
-define void @fpr128(<4 x float>* %p) nounwind ssp !prof !11 !dbg !6 {
+define void @fpr128(ptr %p) nounwind ssp !prof !11 !dbg !6 {
 entry:
   br label %loop, !dbg !8
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-spill.ll b/llvm/test/CodeGen/AArch64/arm64-spill.ll
index 9c0cf38f9a2e5..cbaca482343bd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-spill.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-spill.ll
@@ -6,10 +6,10 @@
 ; CHECK: inlineasm
 ; CHECK: ldr q
 ; CHECK: st1.2d
-define void @fpr128(<4 x float>* %p) nounwind ssp {
+define void @fpr128(ptr %p) nounwind ssp {
 entry:
-  %x = load <4 x float>, <4 x float>* %p, align 16
+  %x = load <4 x float>, ptr %p, align 16
   call void asm sideeffect "; inlineasm", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{q31},~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
-  store <4 x float> %x, <4 x float>* %p, align 16
+  store <4 x float> %x, ptr %p, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
index 51607f9676757..b58f6ba96a5b8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
@@ -18,7 +18,7 @@ define i32 @srl_and()  {
 ; CHECK-NEXT:    and w0, w8, w8, lsr #16
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* @g, align 4
+  %0 = load i16, ptr @g, align 4
   %1 = xor i16 %0, 50
   %tobool = icmp ne i16 %1, 0
   %lor.ext = zext i1 %tobool to i32

diff  --git a/llvm/test/CodeGen/AArch64/arm64-st1.ll b/llvm/test/CodeGen/AArch64/arm64-st1.ll
index 28ddd94a71cf8..16b3f47be24a0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-st1.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-st1.ll
@@ -2,623 +2,623 @@
 ; The instruction latencies of Exynos-M3 trigger the transform we see under the Exynos check.
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs -mcpu=exynos-m3 | FileCheck --check-prefix=EXYNOS %s
 
-define void @st1lane_16b(<16 x i8> %A, i8* %D) {
+define void @st1lane_16b(<16 x i8> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_16b
 ; CHECK: st1.b { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr i8, i8* %D, i64 1
+  %ptr = getelementptr i8, ptr %D, i64 1
   %tmp = extractelement <16 x i8> %A, i32 1
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_16b(<16 x i8> %A, i8* %D) {
+define void @st1lane0_16b(<16 x i8> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_16b
 ; CHECK: st1.b { v0 }[0], [x{{[0-9]+}}]
-  %ptr = getelementptr i8, i8* %D, i64 1
+  %ptr = getelementptr i8, ptr %D, i64 1
   %tmp = extractelement <16 x i8> %A, i32 0
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_16b(<16 x i8> %A, i8* %D) {
+define void @st1lane0u_16b(<16 x i8> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_16b
 ; CHECK: st1.b { v0 }[0], [x{{[0-9]+}}]
-  %ptr = getelementptr i8, i8* %D, i64 -1
+  %ptr = getelementptr i8, ptr %D, i64 -1
   %tmp = extractelement <16 x i8> %A, i32 0
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_16b(<16 x i8> %A, i8* %D, i64 %offset) {
+define void @st1lane_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_16b
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.b { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr i8, i8* %D, i64 %offset
+  %ptr = getelementptr i8, ptr %D, i64 %offset
   %tmp = extractelement <16 x i8> %A, i32 1
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_16b(<16 x i8> %A, i8* %D, i64 %offset) {
+define void @st1lane0_ro_16b(<16 x i8> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_16b
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.b { v0 }[0], [x[[XREG]]]
-  %ptr = getelementptr i8, i8* %D, i64 %offset
+  %ptr = getelementptr i8, ptr %D, i64 %offset
   %tmp = extractelement <16 x i8> %A, i32 0
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_8h(<8 x i16> %A, i16* %D) {
+define void @st1lane_8h(<8 x i16> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_8h
 ; CHECK: st1.h { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr i16, i16* %D, i64 1
+  %ptr = getelementptr i16, ptr %D, i64 1
   %tmp = extractelement <8 x i16> %A, i32 1
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_8h(<8 x i16> %A, i16* %D) {
+define void @st1lane0_8h(<8 x i16> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_8h
 ; CHECK: str h0, [x0, #2]
-  %ptr = getelementptr i16, i16* %D, i64 1
+  %ptr = getelementptr i16, ptr %D, i64 1
   %tmp = extractelement <8 x i16> %A, i32 0
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_8h(<8 x i16> %A, i16* %D) {
+define void @st1lane0u_8h(<8 x i16> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_8h
 ; CHECK: stur h0, [x0, #-2]
-  %ptr = getelementptr i16, i16* %D, i64 -1
+  %ptr = getelementptr i16, ptr %D, i64 -1
   %tmp = extractelement <8 x i16> %A, i32 0
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_8h(<8 x i16> %A, i16* %D, i64 %offset) {
+define void @st1lane_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_8h
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.h { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr i16, i16* %D, i64 %offset
+  %ptr = getelementptr i16, ptr %D, i64 %offset
   %tmp = extractelement <8 x i16> %A, i32 1
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_8h(<8 x i16> %A, i16* %D, i64 %offset) {
+define void @st1lane0_ro_8h(<8 x i16> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_8h
 ; CHECK: str h0, [x0, x1, lsl #1]
-  %ptr = getelementptr i16, i16* %D, i64 %offset
+  %ptr = getelementptr i16, ptr %D, i64 %offset
   %tmp = extractelement <8 x i16> %A, i32 0
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_4s(<4 x i32> %A, i32* %D) {
+define void @st1lane_4s(<4 x i32> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_4s
 ; CHECK: st1.s { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr i32, i32* %D, i64 1
+  %ptr = getelementptr i32, ptr %D, i64 1
   %tmp = extractelement <4 x i32> %A, i32 1
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_4s(<4 x i32> %A, i32* %D) {
+define void @st1lane0_4s(<4 x i32> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_4s
 ; CHECK: str s0, [x0, #4]
-  %ptr = getelementptr i32, i32* %D, i64 1
+  %ptr = getelementptr i32, ptr %D, i64 1
   %tmp = extractelement <4 x i32> %A, i32 0
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_4s(<4 x i32> %A, i32* %D) {
+define void @st1lane0u_4s(<4 x i32> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_4s
 ; CHECK: stur s0, [x0, #-4]
-  %ptr = getelementptr i32, i32* %D, i64 -1
+  %ptr = getelementptr i32, ptr %D, i64 -1
   %tmp = extractelement <4 x i32> %A, i32 0
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_4s(<4 x i32> %A, i32* %D, i64 %offset) {
+define void @st1lane_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_4s
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.s { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr i32, i32* %D, i64 %offset
+  %ptr = getelementptr i32, ptr %D, i64 %offset
   %tmp = extractelement <4 x i32> %A, i32 1
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_4s(<4 x i32> %A, i32* %D, i64 %offset) {
+define void @st1lane0_ro_4s(<4 x i32> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_4s
 ; CHECK: str s0, [x0, x1, lsl #2]
-  %ptr = getelementptr i32, i32* %D, i64 %offset
+  %ptr = getelementptr i32, ptr %D, i64 %offset
   %tmp = extractelement <4 x i32> %A, i32 0
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_4s_float(<4 x float> %A, float* %D) {
+define void @st1lane_4s_float(<4 x float> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_4s_float
 ; CHECK: st1.s { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr float, float* %D, i64 1
+  %ptr = getelementptr float, ptr %D, i64 1
   %tmp = extractelement <4 x float> %A, i32 1
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_4s_float(<4 x float> %A, float* %D) {
+define void @st1lane0_4s_float(<4 x float> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_4s_float
 ; CHECK: str s0, [x0, #4]
-  %ptr = getelementptr float, float* %D, i64 1
+  %ptr = getelementptr float, ptr %D, i64 1
   %tmp = extractelement <4 x float> %A, i32 0
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_4s_float(<4 x float> %A, float* %D) {
+define void @st1lane0u_4s_float(<4 x float> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_4s_float
 ; CHECK: stur s0, [x0, #-4]
-  %ptr = getelementptr float, float* %D, i64 -1
+  %ptr = getelementptr float, ptr %D, i64 -1
   %tmp = extractelement <4 x float> %A, i32 0
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_4s_float(<4 x float> %A, float* %D, i64 %offset) {
+define void @st1lane_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_4s_float
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.s { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr float, float* %D, i64 %offset
+  %ptr = getelementptr float, ptr %D, i64 %offset
   %tmp = extractelement <4 x float> %A, i32 1
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_4s_float(<4 x float> %A, float* %D, i64 %offset) {
+define void @st1lane0_ro_4s_float(<4 x float> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_4s_float
 ; CHECK: str s0, [x0, x1, lsl #2]
-  %ptr = getelementptr float, float* %D, i64 %offset
+  %ptr = getelementptr float, ptr %D, i64 %offset
   %tmp = extractelement <4 x float> %A, i32 0
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_2d(<2 x i64> %A, i64* %D) {
+define void @st1lane_2d(<2 x i64> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_2d
 ; CHECK: st1.d { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr i64, i64* %D, i64 1
+  %ptr = getelementptr i64, ptr %D, i64 1
   %tmp = extractelement <2 x i64> %A, i32 1
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_2d(<2 x i64> %A, i64* %D) {
+define void @st1lane0_2d(<2 x i64> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_2d
 ; CHECK: str d0, [x0, #8]
-  %ptr = getelementptr i64, i64* %D, i64 1
+  %ptr = getelementptr i64, ptr %D, i64 1
   %tmp = extractelement <2 x i64> %A, i32 0
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_2d(<2 x i64> %A, i64* %D) {
+define void @st1lane0u_2d(<2 x i64> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_2d
 ; CHECK: stur d0, [x0, #-8]
-  %ptr = getelementptr i64, i64* %D, i64 -1
+  %ptr = getelementptr i64, ptr %D, i64 -1
   %tmp = extractelement <2 x i64> %A, i32 0
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_2d(<2 x i64> %A, i64* %D, i64 %offset) {
+define void @st1lane_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_2d
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.d { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr i64, i64* %D, i64 %offset
+  %ptr = getelementptr i64, ptr %D, i64 %offset
   %tmp = extractelement <2 x i64> %A, i32 1
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_2d(<2 x i64> %A, i64* %D, i64 %offset) {
+define void @st1lane0_ro_2d(<2 x i64> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_2d
 ; CHECK: str d0, [x0, x1, lsl #3]
-  %ptr = getelementptr i64, i64* %D, i64 %offset
+  %ptr = getelementptr i64, ptr %D, i64 %offset
   %tmp = extractelement <2 x i64> %A, i32 0
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_2d_double(<2 x double> %A, double* %D) {
+define void @st1lane_2d_double(<2 x double> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_2d_double
 ; CHECK: st1.d { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr double, double* %D, i64 1
+  %ptr = getelementptr double, ptr %D, i64 1
   %tmp = extractelement <2 x double> %A, i32 1
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_2d_double(<2 x double> %A, double* %D) {
+define void @st1lane0_2d_double(<2 x double> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_2d_double
 ; CHECK: str d0, [x0, #8]
-  %ptr = getelementptr double, double* %D, i64 1
+  %ptr = getelementptr double, ptr %D, i64 1
   %tmp = extractelement <2 x double> %A, i32 0
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_2d_double(<2 x double> %A, double* %D) {
+define void @st1lane0u_2d_double(<2 x double> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_2d_double
 ; CHECK: stur d0, [x0, #-8]
-  %ptr = getelementptr double, double* %D, i64 -1
+  %ptr = getelementptr double, ptr %D, i64 -1
   %tmp = extractelement <2 x double> %A, i32 0
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_2d_double(<2 x double> %A, double* %D, i64 %offset) {
+define void @st1lane_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_2d_double
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.d { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr double, double* %D, i64 %offset
+  %ptr = getelementptr double, ptr %D, i64 %offset
   %tmp = extractelement <2 x double> %A, i32 1
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_2d_double(<2 x double> %A, double* %D, i64 %offset) {
+define void @st1lane0_ro_2d_double(<2 x double> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_2d_double
 ; CHECK: str d0, [x0, x1, lsl #3]
-  %ptr = getelementptr double, double* %D, i64 %offset
+  %ptr = getelementptr double, ptr %D, i64 %offset
   %tmp = extractelement <2 x double> %A, i32 0
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_8b(<8 x i8> %A, i8* %D) {
+define void @st1lane_8b(<8 x i8> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_8b
 ; CHECK: st1.b { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr i8, i8* %D, i64 1
+  %ptr = getelementptr i8, ptr %D, i64 1
   %tmp = extractelement <8 x i8> %A, i32 1
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_8b(<8 x i8> %A, i8* %D, i64 %offset) {
+define void @st1lane_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_8b
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.b { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr i8, i8* %D, i64 %offset
+  %ptr = getelementptr i8, ptr %D, i64 %offset
   %tmp = extractelement <8 x i8> %A, i32 1
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_8b(<8 x i8> %A, i8* %D, i64 %offset) {
+define void @st1lane0_ro_8b(<8 x i8> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_8b
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.b { v0 }[0], [x[[XREG]]]
-  %ptr = getelementptr i8, i8* %D, i64 %offset
+  %ptr = getelementptr i8, ptr %D, i64 %offset
   %tmp = extractelement <8 x i8> %A, i32 0
-  store i8 %tmp, i8* %ptr
+  store i8 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_4h(<4 x i16> %A, i16* %D) {
+define void @st1lane_4h(<4 x i16> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_4h
 ; CHECK: st1.h { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr i16, i16* %D, i64 1
+  %ptr = getelementptr i16, ptr %D, i64 1
   %tmp = extractelement <4 x i16> %A, i32 1
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_4h(<4 x i16> %A, i16* %D) {
+define void @st1lane0_4h(<4 x i16> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_4h
 ; CHECK: str h0, [x0, #2]
-  %ptr = getelementptr i16, i16* %D, i64 1
+  %ptr = getelementptr i16, ptr %D, i64 1
   %tmp = extractelement <4 x i16> %A, i32 0
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_4h(<4 x i16> %A, i16* %D) {
+define void @st1lane0u_4h(<4 x i16> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_4h
 ; CHECK: stur h0, [x0, #-2]
-  %ptr = getelementptr i16, i16* %D, i64 -1
+  %ptr = getelementptr i16, ptr %D, i64 -1
   %tmp = extractelement <4 x i16> %A, i32 0
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_4h(<4 x i16> %A, i16* %D, i64 %offset) {
+define void @st1lane_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_4h
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.h { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr i16, i16* %D, i64 %offset
+  %ptr = getelementptr i16, ptr %D, i64 %offset
   %tmp = extractelement <4 x i16> %A, i32 1
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_4h(<4 x i16> %A, i16* %D, i64 %offset) {
+define void @st1lane0_ro_4h(<4 x i16> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_4h
 ; CHECK: str h0, [x0, x1, lsl #1]
-  %ptr = getelementptr i16, i16* %D, i64 %offset
+  %ptr = getelementptr i16, ptr %D, i64 %offset
   %tmp = extractelement <4 x i16> %A, i32 0
-  store i16 %tmp, i16* %ptr
+  store i16 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_2s(<2 x i32> %A, i32* %D) {
+define void @st1lane_2s(<2 x i32> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_2s
 ; CHECK: st1.s { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr i32, i32* %D, i64 1
+  %ptr = getelementptr i32, ptr %D, i64 1
   %tmp = extractelement <2 x i32> %A, i32 1
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_2s(<2 x i32> %A, i32* %D) {
+define void @st1lane0_2s(<2 x i32> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_2s
 ; CHECK: str s0, [x0, #4]
-  %ptr = getelementptr i32, i32* %D, i64 1
+  %ptr = getelementptr i32, ptr %D, i64 1
   %tmp = extractelement <2 x i32> %A, i32 0
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_2s(<2 x i32> %A, i32* %D) {
+define void @st1lane0u_2s(<2 x i32> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_2s
 ; CHECK: stur s0, [x0, #-4]
-  %ptr = getelementptr i32, i32* %D, i64 -1
+  %ptr = getelementptr i32, ptr %D, i64 -1
   %tmp = extractelement <2 x i32> %A, i32 0
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_2s(<2 x i32> %A, i32* %D, i64 %offset) {
+define void @st1lane_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_2s
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.s { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr i32, i32* %D, i64 %offset
+  %ptr = getelementptr i32, ptr %D, i64 %offset
   %tmp = extractelement <2 x i32> %A, i32 1
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_2s(<2 x i32> %A, i32* %D, i64 %offset) {
+define void @st1lane0_ro_2s(<2 x i32> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_2s
 ; CHECK: str s0, [x0, x1, lsl #2]
-  %ptr = getelementptr i32, i32* %D, i64 %offset
+  %ptr = getelementptr i32, ptr %D, i64 %offset
   %tmp = extractelement <2 x i32> %A, i32 0
-  store i32 %tmp, i32* %ptr
+  store i32 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_2s_float(<2 x float> %A, float* %D) {
+define void @st1lane_2s_float(<2 x float> %A, ptr %D) {
 ; CHECK-LABEL: st1lane_2s_float
 ; CHECK: st1.s { v0 }[1], [x{{[0-9]+}}]
-  %ptr = getelementptr float, float* %D, i64 1
+  %ptr = getelementptr float, ptr %D, i64 1
   %tmp = extractelement <2 x float> %A, i32 1
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_2s_float(<2 x float> %A, float* %D) {
+define void @st1lane0_2s_float(<2 x float> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_2s_float
 ; CHECK: str s0, [x0, #4]
-  %ptr = getelementptr float, float* %D, i64 1
+  %ptr = getelementptr float, ptr %D, i64 1
   %tmp = extractelement <2 x float> %A, i32 0
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_2s_float(<2 x float> %A, float* %D) {
+define void @st1lane0u_2s_float(<2 x float> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_2s_float
 ; CHECK: stur s0, [x0, #-4]
-  %ptr = getelementptr float, float* %D, i64 -1
+  %ptr = getelementptr float, ptr %D, i64 -1
   %tmp = extractelement <2 x float> %A, i32 0
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane_ro_2s_float(<2 x float> %A, float* %D, i64 %offset) {
+define void @st1lane_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane_ro_2s_float
 ; CHECK: add x[[XREG:[0-9]+]], x0, x1
 ; CHECK: st1.s { v0 }[1], [x[[XREG]]]
-  %ptr = getelementptr float, float* %D, i64 %offset
+  %ptr = getelementptr float, ptr %D, i64 %offset
   %tmp = extractelement <2 x float> %A, i32 1
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_2s_float(<2 x float> %A, float* %D, i64 %offset) {
+define void @st1lane0_ro_2s_float(<2 x float> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_2s_float
 ; CHECK: str s0, [x0, x1, lsl #2]
-  %ptr = getelementptr float, float* %D, i64 %offset
+  %ptr = getelementptr float, ptr %D, i64 %offset
   %tmp = extractelement <2 x float> %A, i32 0
-  store float %tmp, float* %ptr
+  store float %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_1d(<1 x i64> %A, i64* %D) {
+define void @st1lane0_1d(<1 x i64> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_1d
 ; CHECK: str d0, [x0, #8]
-  %ptr = getelementptr i64, i64* %D, i64 1
+  %ptr = getelementptr i64, ptr %D, i64 1
   %tmp = extractelement <1 x i64> %A, i32 0
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_1d(<1 x i64> %A, i64* %D) {
+define void @st1lane0u_1d(<1 x i64> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_1d
 ; CHECK: stur d0, [x0, #-8]
-  %ptr = getelementptr i64, i64* %D, i64 -1
+  %ptr = getelementptr i64, ptr %D, i64 -1
   %tmp = extractelement <1 x i64> %A, i32 0
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_1d(<1 x i64> %A, i64* %D, i64 %offset) {
+define void @st1lane0_ro_1d(<1 x i64> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_1d
 ; CHECK: str d0, [x0, x1, lsl #3]
-  %ptr = getelementptr i64, i64* %D, i64 %offset
+  %ptr = getelementptr i64, ptr %D, i64 %offset
   %tmp = extractelement <1 x i64> %A, i32 0
-  store i64 %tmp, i64* %ptr
+  store i64 %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_1d_double(<1 x double> %A, double* %D) {
+define void @st1lane0_1d_double(<1 x double> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0_1d_double
 ; CHECK: str d0, [x0, #8]
-  %ptr = getelementptr double, double* %D, i64 1
+  %ptr = getelementptr double, ptr %D, i64 1
   %tmp = extractelement <1 x double> %A, i32 0
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0u_1d_double(<1 x double> %A, double* %D) {
+define void @st1lane0u_1d_double(<1 x double> %A, ptr %D) {
 ; CHECK-LABEL: st1lane0u_1d_double
 ; CHECK: stur d0, [x0, #-8]
-  %ptr = getelementptr double, double* %D, i64 -1
+  %ptr = getelementptr double, ptr %D, i64 -1
   %tmp = extractelement <1 x double> %A, i32 0
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st1lane0_ro_1d_double(<1 x double> %A, double* %D, i64 %offset) {
+define void @st1lane0_ro_1d_double(<1 x double> %A, ptr %D, i64 %offset) {
 ; CHECK-LABEL: st1lane0_ro_1d_double
 ; CHECK: str d0, [x0, x1, lsl #3]
-  %ptr = getelementptr double, double* %D, i64 %offset
+  %ptr = getelementptr double, ptr %D, i64 %offset
   %tmp = extractelement <1 x double> %A, i32 0
-  store double %tmp, double* %ptr
+  store double %tmp, ptr %ptr
   ret void
 }
 
-define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, i8* %D) {
+define void @st2lane_16b(<16 x i8> %A, <16 x i8> %B, ptr %D) {
 ; CHECK-LABEL: st2lane_16b
 ; CHECK: st2.b
-  call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, i64 1, i8* %D)
+  call void @llvm.aarch64.neon.st2lane.v16i8.p0(<16 x i8> %A, <16 x i8> %B, i64 1, ptr %D)
   ret void
 }
 
-define void @st2lane_8h(<8 x i16> %A, <8 x i16> %B, i16* %D) {
+define void @st2lane_8h(<8 x i16> %A, <8 x i16> %B, ptr %D) {
 ; CHECK-LABEL: st2lane_8h
 ; CHECK: st2.h
-  call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, i64 1, i16* %D)
+  call void @llvm.aarch64.neon.st2lane.v8i16.p0(<8 x i16> %A, <8 x i16> %B, i64 1, ptr %D)
   ret void
 }
 
-define void @st2lane_4s(<4 x i32> %A, <4 x i32> %B, i32* %D) {
+define void @st2lane_4s(<4 x i32> %A, <4 x i32> %B, ptr %D) {
 ; CHECK-LABEL: st2lane_4s
 ; CHECK: st2.s
-  call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, i64 1, i32* %D)
+  call void @llvm.aarch64.neon.st2lane.v4i32.p0(<4 x i32> %A, <4 x i32> %B, i64 1, ptr %D)
   ret void
 }
 
-define void @st2lane_2d(<2 x i64> %A, <2 x i64> %B, i64* %D) {
+define void @st2lane_2d(<2 x i64> %A, <2 x i64> %B, ptr %D) {
 ; CHECK-LABEL: st2lane_2d
 ; CHECK: st2.d
-  call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64 1, i64* %D)
+  call void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64> %A, <2 x i64> %B, i64 1, ptr %D)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8>, <16 x i8>, i64, i8*) nounwind readnone
-declare void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16>, <8 x i16>, i64, i16*) nounwind readnone
-declare void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32>, <4 x i32>, i64, i32*) nounwind readnone
-declare void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64>, <2 x i64>, i64, i64*) nounwind readnone
+declare void @llvm.aarch64.neon.st2lane.v16i8.p0(<16 x i8>, <16 x i8>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st2lane.v8i16.p0(<8 x i16>, <8 x i16>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st2lane.v4i32.p0(<4 x i32>, <4 x i32>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr) nounwind readnone
 
-define void @st3lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %D) {
+define void @st3lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %D) {
 ; CHECK-LABEL: st3lane_16b
 ; CHECK: st3.b
-  call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i64 1, i8* %D)
+  call void @llvm.aarch64.neon.st3lane.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i64 1, ptr %D)
   ret void
 }
 
-define void @st3lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %D) {
+define void @st3lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %D) {
 ; CHECK-LABEL: st3lane_8h
 ; CHECK: st3.h
-  call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i64 1, i16* %D)
+  call void @llvm.aarch64.neon.st3lane.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i64 1, ptr %D)
   ret void
 }
 
-define void @st3lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %D) {
+define void @st3lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %D) {
 ; CHECK-LABEL: st3lane_4s
 ; CHECK: st3.s
-  call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i64 1, i32* %D)
+  call void @llvm.aarch64.neon.st3lane.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i64 1, ptr %D)
   ret void
 }
 
-define void @st3lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %D) {
+define void @st3lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %D) {
 ; CHECK-LABEL: st3lane_2d
 ; CHECK: st3.d
-  call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64 1, i64* %D)
+  call void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64 1, ptr %D)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readnone
-declare void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readnone
-declare void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readnone
-declare void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readnone
+declare void @llvm.aarch64.neon.st3lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st3lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st3lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st3lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readnone
 
-define void @st4lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %E) {
+define void @st4lane_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %E) {
 ; CHECK-LABEL: st4lane_16b
 ; CHECK: st4.b
-  call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 1, i8* %E)
+  call void @llvm.aarch64.neon.st4lane.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 1, ptr %E)
   ret void
 }
 
-define void @st4lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %E) {
+define void @st4lane_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %E) {
 ; CHECK-LABEL: st4lane_8h
 ; CHECK: st4.h
-  call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 1, i16* %E)
+  call void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 1, ptr %E)
   ret void
 }
 
-define void @st4lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %E) {
+define void @st4lane_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %E) {
 ; CHECK-LABEL: st4lane_4s
 ; CHECK: st4.s
-  call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 1, i32* %E)
+  call void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 1, ptr %E)
   ret void
 }
 
-define void @st4lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %E) {
+define void @st4lane_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %E) {
 ; CHECK-LABEL: st4lane_2d
 ; CHECK: st4.d
-  call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 1, i64* %E)
+  call void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 1, ptr %E)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, i8*) nounwind readnone
-declare void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, i16*) nounwind readnone
-declare void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i32*) nounwind readnone
-declare void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, i64*) nounwind readnone
+declare void @llvm.aarch64.neon.st4lane.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st4lane.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st4lane.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i64, ptr) nounwind readnone
+declare void @llvm.aarch64.neon.st4lane.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64, ptr) nounwind readnone
 
 
-define void @st2_8b(<8 x i8> %A, <8 x i8> %B, i8* %P) nounwind {
+define void @st2_8b(<8 x i8> %A, <8 x i8> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_8b
 ; CHECK: st2.8b
 ; EXYNOS-LABEL: st2_8b
 ; EXYNOS: zip1.8b
 ; EXYNOS: zip2.8b
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, i8* %P)
+	call void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8> %A, <8 x i8> %B, ptr %P)
 	ret void
 }
 
-define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %P) nounwind {
+define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_8b
 ; CHECK: st3.8b
-	call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %P)
+	call void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %P)
 	ret void
 }
 
-define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %P) nounwind {
+define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_8b
 ; CHECK: st4.8b
 ; EXYNOS-LABEL: st4_8b
@@ -632,33 +632,33 @@ define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %P)
 ; EXYNOS: zip1.8b
 ; EXYNOS: zip2.8b
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %P)
+	call void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v8i8.p0(<8 x i8>, <8 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr) nounwind readonly
 
-define void @st2_16b(<16 x i8> %A, <16 x i8> %B, i8* %P) nounwind {
+define void @st2_16b(<16 x i8> %A, <16 x i8> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_16b
 ; CHECK: st2.16b
 ; EXYNOS-LABEL: st2_16b
 ; EXYNOS: zip1.16b
 ; EXYNOS: zip2.16b
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, i8* %P)
+	call void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8> %A, <16 x i8> %B, ptr %P)
 	ret void
 }
 
-define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %P) nounwind {
+define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_16b
 ; CHECK: st3.16b
-	call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %P)
+	call void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %P)
 	ret void
 }
 
-define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %P) nounwind {
+define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_16b
 ; CHECK: st4.16b
 ; EXYNOS-LABEL: st4_16b
@@ -672,33 +672,33 @@ define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8*
 ; EXYNOS: zip1.16b
 ; EXYNOS: zip2.16b
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %P)
+	call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v16i8.p0(<16 x i8>, <16 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, ptr) nounwind readonly
 
-define void @st2_4h(<4 x i16> %A, <4 x i16> %B, i16* %P) nounwind {
+define void @st2_4h(<4 x i16> %A, <4 x i16> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_4h
 ; CHECK: st2.4h
 ; EXYNOS-LABEL: st2_4h
 ; EXYNOS: zip1.4h
 ; EXYNOS: zip2.4h
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, i16* %P)
+	call void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16> %A, <4 x i16> %B, ptr %P)
 	ret void
 }
 
-define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %P) nounwind {
+define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_4h
 ; CHECK: st3.4h
-	call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %P)
+	call void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %P)
 	ret void
 }
 
-define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %P) nounwind {
+define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_4h
 ; CHECK: st4.4h
 ; EXYNOS-LABEL: st4_4h
@@ -712,33 +712,33 @@ define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16*
 ; EXYNOS: zip1.4h
 ; EXYNOS: zip2.4h
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %P)
+	call void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v4i16.p0(<4 x i16>, <4 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, ptr) nounwind readonly
 
-define void @st2_8h(<8 x i16> %A, <8 x i16> %B, i16* %P) nounwind {
+define void @st2_8h(<8 x i16> %A, <8 x i16> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_8h
 ; CHECK: st2.8h
 ; EXYNOS-LABEL: st2_8h
 ; EXYNOS: zip1.8h
 ; EXYNOS: zip2.8h
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, i16* %P)
+	call void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16> %A, <8 x i16> %B, ptr %P)
 	ret void
 }
 
-define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %P) nounwind {
+define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_8h
 ; CHECK: st3.8h
-	call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %P)
+	call void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %P)
 	ret void
 }
 
-define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %P) nounwind {
+define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_8h
 ; CHECK: st4.8h
 ; EXYNOS-LABEL: st4_8h
@@ -752,33 +752,33 @@ define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16*
 ; EXYNOS: zip1.8h
 ; EXYNOS: zip2.8h
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %P)
+	call void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v8i16.p0(<8 x i16>, <8 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, ptr) nounwind readonly
 
-define void @st2_2s(<2 x i32> %A, <2 x i32> %B, i32* %P) nounwind {
+define void @st2_2s(<2 x i32> %A, <2 x i32> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_2s
 ; CHECK: st2.2s
 ; EXYNOS-LABEL: st2_2s
 ; EXYNOS: zip1.2s
 ; EXYNOS: zip2.2s
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, i32* %P)
+	call void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32> %A, <2 x i32> %B, ptr %P)
 	ret void
 }
 
-define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %P) nounwind {
+define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_2s
 ; CHECK: st3.2s
-	call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %P)
+	call void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %P)
 	ret void
 }
 
-define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %P) nounwind {
+define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_2s
 ; CHECK: st4.2s
 ; EXYNOS-LABEL: st4_2s
@@ -792,33 +792,33 @@ define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32*
 ; EXYNOS: zip1.2s
 ; EXYNOS: zip2.2s
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %P)
+	call void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v2i32.p0(<2 x i32>, <2 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, ptr) nounwind readonly
 
-define void @st2_4s(<4 x i32> %A, <4 x i32> %B, i32* %P) nounwind {
+define void @st2_4s(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_4s
 ; CHECK: st2.4s
 ; EXYNOS-LABEL: st2_4s
 ; EXYNOS: zip1.4s
 ; EXYNOS: zip2.4s
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, i32* %P)
+	call void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32> %A, <4 x i32> %B, ptr %P)
 	ret void
 }
 
-define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %P) nounwind {
+define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_4s
 ; CHECK: st3.4s
-	call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %P)
+	call void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %P)
 	ret void
 }
 
-define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %P) nounwind {
+define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_4s
 ; CHECK: st4.4s
 ; EXYNOS-LABEL: st4_4s
@@ -832,59 +832,59 @@ define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32*
 ; EXYNOS: zip1.4s
 ; EXYNOS: zip2.4s
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %P)
+	call void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32>, <4 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, ptr) nounwind readonly
 
 ; If there's only one element, st2/3/4 don't make much sense, stick to st1.
-define void @st2_1d(<1 x i64> %A, <1 x i64> %B, i64* %P) nounwind {
+define void @st2_1d(<1 x i64> %A, <1 x i64> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_1d
 ; CHECK: st1.1d
-	call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, i64* %P)
+	call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> %A, <1 x i64> %B, ptr %P)
 	ret void
 }
 
-define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %P) nounwind {
+define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_1d
 ; CHECK: st1.1d
-	call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %P)
+	call void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %P)
 	ret void
 }
 
-define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %P) nounwind {
+define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_1d
 ; CHECK: st1.1d
-	call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %P)
+	call void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64>, <1 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, ptr) nounwind readonly
 
-define void @st2_2d(<2 x i64> %A, <2 x i64> %B, i64* %P) nounwind {
+define void @st2_2d(<2 x i64> %A, <2 x i64> %B, ptr %P) nounwind {
 ; CHECK-LABEL: st2_2d
 ; CHECK: st2.2d
 ; EXYNOS-LABEL: st2_2d
 ; EXYNOS: zip1.2d
 ; EXYNOS: zip2.2d
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64* %P)
+	call void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64> %A, <2 x i64> %B, ptr %P)
 	ret void
 }
 
-define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %P) nounwind {
+define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P) nounwind {
 ; CHECK-LABEL: st3_2d
 ; CHECK: st3.2d
-	call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %P)
+	call void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %P)
 	ret void
 }
 
-define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %P) nounwind {
+define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P) nounwind {
 ; CHECK-LABEL: st4_2d
 ; CHECK: st4.2d
 ; EXYNOS-LABEL: st4_2d
@@ -898,305 +898,305 @@ define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64*
 ; EXYNOS: zip1.2d
 ; EXYNOS: zip2.2d
 ; EXYNOS: stp
-	call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %P)
+	call void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %P)
 	ret void
 }
 
-declare void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
+declare void @llvm.aarch64.neon.st2.v2i64.p0(<2 x i64>, <2 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st3.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st4.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, ptr) nounwind readonly
 
-declare void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16>, <4 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32>, <2 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float>, <2 x float>, float*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64>, <1 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double>, <1 x double>, double*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8>, <8 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16>, <4 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2i32.p0(<2 x i32>, <2 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2f32.p0(<2 x float>, <2 x float>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64>, <1 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double>, <1 x double>, ptr) nounwind readonly
 
-define void @st1_x2_v8i8(<8 x i8> %A, <8 x i8> %B, i8* %addr) {
+define void @st1_x2_v8i8(<8 x i8> %A, <8 x i8> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v8i8:
 ; CHECK: st1.8b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, i8* %addr)
+  call void @llvm.aarch64.neon.st1x2.v8i8.p0(<8 x i8> %A, <8 x i8> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v4i16(<4 x i16> %A, <4 x i16> %B, i16* %addr) {
+define void @st1_x2_v4i16(<4 x i16> %A, <4 x i16> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v4i16:
 ; CHECK: st1.4h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, i16* %addr)
+  call void @llvm.aarch64.neon.st1x2.v4i16.p0(<4 x i16> %A, <4 x i16> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v2i32(<2 x i32> %A, <2 x i32> %B, i32* %addr) {
+define void @st1_x2_v2i32(<2 x i32> %A, <2 x i32> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v2i32:
 ; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, i32* %addr)
+  call void @llvm.aarch64.neon.st1x2.v2i32.p0(<2 x i32> %A, <2 x i32> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v2f32(<2 x float> %A, <2 x float> %B, float* %addr) {
+define void @st1_x2_v2f32(<2 x float> %A, <2 x float> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v2f32:
 ; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %A, <2 x float> %B, float* %addr)
+  call void @llvm.aarch64.neon.st1x2.v2f32.p0(<2 x float> %A, <2 x float> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v1i64(<1 x i64> %A, <1 x i64> %B, i64* %addr) {
+define void @st1_x2_v1i64(<1 x i64> %A, <1 x i64> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v1i64:
 ; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, i64* %addr)
+  call void @llvm.aarch64.neon.st1x2.v1i64.p0(<1 x i64> %A, <1 x i64> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v1f64(<1 x double> %A, <1 x double> %B, double* %addr) {
+define void @st1_x2_v1f64(<1 x double> %A, <1 x double> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v1f64:
 ; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %A, <1 x double> %B, double* %addr)
+  call void @llvm.aarch64.neon.st1x2.v1f64.p0(<1 x double> %A, <1 x double> %B, ptr %addr)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8>, <16 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16>, <8 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float>, <4 x float>, float*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64>, <2 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double>, <2 x double>, double*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8>, <16 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16>, <8 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v4i32.p0(<4 x i32>, <4 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float>, <4 x float>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64>, <2 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double>, <2 x double>, ptr) nounwind readonly
 
-define void @st1_x2_v16i8(<16 x i8> %A, <16 x i8> %B, i8* %addr) {
+define void @st1_x2_v16i8(<16 x i8> %A, <16 x i8> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v16i8:
 ; CHECK: st1.16b { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, i8* %addr)
+  call void @llvm.aarch64.neon.st1x2.v16i8.p0(<16 x i8> %A, <16 x i8> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v8i16(<8 x i16> %A, <8 x i16> %B, i16* %addr) {
+define void @st1_x2_v8i16(<8 x i16> %A, <8 x i16> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v8i16:
 ; CHECK: st1.8h { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, i16* %addr)
+  call void @llvm.aarch64.neon.st1x2.v8i16.p0(<8 x i16> %A, <8 x i16> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v4i32(<4 x i32> %A, <4 x i32> %B, i32* %addr) {
+define void @st1_x2_v4i32(<4 x i32> %A, <4 x i32> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v4i32:
 ; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, i32* %addr)
+  call void @llvm.aarch64.neon.st1x2.v4i32.p0(<4 x i32> %A, <4 x i32> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v4f32(<4 x float> %A, <4 x float> %B, float* %addr) {
+define void @st1_x2_v4f32(<4 x float> %A, <4 x float> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v4f32:
 ; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %A, <4 x float> %B, float* %addr)
+  call void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float> %A, <4 x float> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v2i64(<2 x i64> %A, <2 x i64> %B, i64* %addr) {
+define void @st1_x2_v2i64(<2 x i64> %A, <2 x i64> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v2i64:
 ; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64* %addr)
+  call void @llvm.aarch64.neon.st1x2.v2i64.p0(<2 x i64> %A, <2 x i64> %B, ptr %addr)
   ret void
 }
 
-define void @st1_x2_v2f64(<2 x double> %A, <2 x double> %B, double* %addr) {
+define void @st1_x2_v2f64(<2 x double> %A, <2 x double> %B, ptr %addr) {
 ; CHECK-LABEL: st1_x2_v2f64:
 ; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %A, <2 x double> %B, double* %addr)
+  call void @llvm.aarch64.neon.st1x2.v2f64.p0(<2 x double> %A, <2 x double> %B, ptr %addr)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, float*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, double*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, ptr) nounwind readonly
 
-define void @st1_x3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %addr) {
+define void @st1_x3_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v8i8:
 ; CHECK: st1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %addr)
+  call void @llvm.aarch64.neon.st1x3.v8i8.p0(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %addr) {
+define void @st1_x3_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v4i16:
 ; CHECK: st1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %addr)
+  call void @llvm.aarch64.neon.st1x3.v4i16.p0(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %addr) {
+define void @st1_x3_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v2i32:
 ; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %addr)
+  call void @llvm.aarch64.neon.st1x3.v2i32.p0(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, float* %addr) {
+define void @st1_x3_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v2f32:
 ; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, float* %addr)
+  call void @llvm.aarch64.neon.st1x3.v2f32.p0(<2 x float> %A, <2 x float> %B, <2 x float> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %addr) {
+define void @st1_x3_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v1i64:
 ; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %addr)
+  call void @llvm.aarch64.neon.st1x3.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, double* %addr) {
+define void @st1_x3_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v1f64:
 ; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, double* %addr)
+  call void @llvm.aarch64.neon.st1x3.v1f64.p0(<1 x double> %A, <1 x double> %B, <1 x double> %C, ptr %addr)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, double*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, ptr) nounwind readonly
 
-define void @st1_x3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %addr) {
+define void @st1_x3_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v16i8:
 ; CHECK: st1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %addr)
+  call void @llvm.aarch64.neon.st1x3.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %addr) {
+define void @st1_x3_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v8i16:
 ; CHECK: st1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %addr)
+  call void @llvm.aarch64.neon.st1x3.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %addr) {
+define void @st1_x3_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v4i32:
 ; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %addr)
+  call void @llvm.aarch64.neon.st1x3.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, float* %addr) {
+define void @st1_x3_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v4f32:
 ; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, float* %addr)
+  call void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float> %A, <4 x float> %B, <4 x float> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %addr) {
+define void @st1_x3_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v2i64:
 ; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %addr)
+  call void @llvm.aarch64.neon.st1x3.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, ptr %addr)
   ret void
 }
 
-define void @st1_x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, double* %addr) {
+define void @st1_x3_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %addr) {
 ; CHECK-LABEL: st1_x3_v2f64:
 ; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, double* %addr)
+  call void @llvm.aarch64.neon.st1x3.v2f64.p0(<2 x double> %A, <2 x double> %B, <2 x double> %C, ptr %addr)
   ret void
 }
 
 
-declare void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float>, <2 x float>, <2 x float>, <2 x float>, float*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double>, <1 x double>, <1 x double>, <1 x double>, double*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2i32.p0(<2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2f32.p0(<2 x float>, <2 x float>, <2 x float>, <2 x float>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double>, <1 x double>, <1 x double>, <1 x double>, ptr) nounwind readonly
 
-define void @st1_x4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %addr) {
+define void @st1_x4_v8i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v8i8:
 ; CHECK: st1.8b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %addr)
+  call void @llvm.aarch64.neon.st1x4.v8i8.p0(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %addr) {
+define void @st1_x4_v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v4i16:
 ; CHECK: st1.4h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %addr)
+  call void @llvm.aarch64.neon.st1x4.v4i16.p0(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %addr) {
+define void @st1_x4_v2i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v2i32:
 ; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %addr)
+  call void @llvm.aarch64.neon.st1x4.v2i32.p0(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, float* %addr) {
+define void @st1_x4_v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v2f32:
 ; CHECK: st1.2s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, float* %addr)
+  call void @llvm.aarch64.neon.st1x4.v2f32.p0(<2 x float> %A, <2 x float> %B, <2 x float> %C, <2 x float> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %addr) {
+define void @st1_x4_v1i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v1i64:
 ; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %addr)
+  call void @llvm.aarch64.neon.st1x4.v1i64.p0(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, double* %addr) {
+define void @st1_x4_v1f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v1f64:
 ; CHECK: st1.1d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, double* %addr)
+  call void @llvm.aarch64.neon.st1x4.v1f64.p0(<1 x double> %A, <1 x double> %B, <1 x double> %C, <1 x double> %D, ptr %addr)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i8*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i16*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i64*) nounwind readonly
-declare void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double>, <2 x double>, <2 x double>, <2 x double>, double*) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v4i32.p0(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, ptr) nounwind readonly
+declare void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double>, <2 x double>, <2 x double>, <2 x double>, ptr) nounwind readonly
 
-define void @st1_x4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %addr) {
+define void @st1_x4_v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v16i8:
 ; CHECK: st1.16b { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %addr)
+  call void @llvm.aarch64.neon.st1x4.v16i8.p0(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %addr) {
+define void @st1_x4_v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v8i16:
 ; CHECK: st1.8h { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %addr)
+  call void @llvm.aarch64.neon.st1x4.v8i16.p0(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %addr) {
+define void @st1_x4_v4i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v4i32:
 ; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %addr)
+  call void @llvm.aarch64.neon.st1x4.v4i32.p0(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, float* %addr) {
+define void @st1_x4_v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v4f32:
 ; CHECK: st1.4s { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, float* %addr)
+  call void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float> %A, <4 x float> %B, <4 x float> %C, <4 x float> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %addr) {
+define void @st1_x4_v2i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v2i64:
 ; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %addr)
+  call void @llvm.aarch64.neon.st1x4.v2i64.p0(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, ptr %addr)
   ret void
 }
 
-define void @st1_x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, double* %addr) {
+define void @st1_x4_v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %addr) {
 ; CHECK-LABEL: st1_x4_v2f64:
 ; CHECK: st1.2d { {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} }, [x0]
-  call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, double* %addr)
+  call void @llvm.aarch64.neon.st1x4.v2f64.p0(<2 x double> %A, <2 x double> %B, <2 x double> %C, <2 x double> %D, ptr %addr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-stack-no-frame.ll b/llvm/test/CodeGen/AArch64/arm64-stack-no-frame.ll
index 22a67070a1294..95dfd52bbc80c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stack-no-frame.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stack-no-frame.ll
@@ -9,11 +9,11 @@ define void @test_stack_no_frame() {
 ; CHECK: test_stack_no_frame
 ; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
   %local = alloca [20 x i64]
-  %val = load volatile [20 x i64], [20 x i64]* @global, align 8
-  store volatile [20 x i64] %val, [20 x i64]* %local, align 8
+  %val = load volatile [20 x i64], ptr @global, align 8
+  store volatile [20 x i64] %val, ptr %local, align 8
 
-  %val2 = load volatile [20 x i64], [20 x i64]* %local, align 8
-  store volatile [20 x i64] %val2, [20 x i64]* @global, align 8
+  %val2 = load volatile [20 x i64], ptr %local, align 8
+  store volatile [20 x i64] %val2, ptr @global, align 8
 
 ; CHECK: add sp, sp, #[[STACKSIZE]]
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/arm64-stacksave.ll b/llvm/test/CodeGen/AArch64/arm64-stacksave.ll
index 13d4ae23db698..e471b1989f61b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stacksave.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stacksave.ll
@@ -10,11 +10,11 @@
 ; CHECK: inlineasm
 define void @f() nounwind ssp {
 entry:
-  %savedstack = call i8* @llvm.stacksave() nounwind
+  %savedstack = call ptr @llvm.stacksave() nounwind
   call void asm sideeffect "; inlineasm", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
-  call void @llvm.stackrestore(i8* %savedstack) nounwind
+  call void @llvm.stackrestore(ptr %savedstack) nounwind
   ret void
 }
 
-declare i8* @llvm.stacksave() nounwind
-declare void @llvm.stackrestore(i8*) nounwind
+declare ptr @llvm.stacksave() nounwind
+declare void @llvm.stackrestore(ptr) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll b/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll
index bb65e73c49437..188a4f07a33dc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll
@@ -11,7 +11,7 @@
 ; CHECK: stp     xzr, xzr, [x8] 
 ; CHECK: bl f
 
- at q = external dso_local unnamed_addr global i16*, align 8
+ at q = external dso_local unnamed_addr global ptr, align 8
 
 ; Function Attrs: nounwind
 define void @test() local_unnamed_addr #0 {
@@ -22,19 +22,14 @@ for.body453.i:                                    ; preds = %for.body453.i, %ent
   br i1 undef, label %for.body453.i, label %for.end705.i
 
 for.end705.i:                                     ; preds = %for.body453.i
-  %0 = load i16*, i16** @q, align 8
-  %1 = getelementptr inbounds i16, i16* %0, i64 0
-  %2 = bitcast i16* %1 to <2 x i16>*
-  store <2 x i16> zeroinitializer, <2 x i16>* %2, align 2
-  %3 = getelementptr i16, i16* %1, i64 2
-  %4 = bitcast i16* %3 to <2 x i16>*
-  store <2 x i16> zeroinitializer, <2 x i16>* %4, align 2
-  %5 = getelementptr i16, i16* %1, i64 4
-  %6 = bitcast i16* %5 to <2 x i16>*
-  store <2 x i16> zeroinitializer, <2 x i16>* %6, align 2
-  %7 = getelementptr i16, i16* %1, i64 6
-  %8 = bitcast i16* %7 to <2 x i16>*
-  store <2 x i16> zeroinitializer, <2 x i16>* %8, align 2
+  %0 = load ptr, ptr @q, align 8
+  store <2 x i16> zeroinitializer, ptr %0, align 2
+  %1 = getelementptr i16, ptr %0, i64 2
+  store <2 x i16> zeroinitializer, ptr %1, align 2
+  %2 = getelementptr i16, ptr %0, i64 4
+  store <2 x i16> zeroinitializer, ptr %2, align 2
+  %3 = getelementptr i16, ptr %0, i64 6
+  store <2 x i16> zeroinitializer, ptr %3, align 2
   call void @f() #2
   unreachable
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
index 79c8ec70fcddc..d1aa88e1d7041 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
@@ -7,12 +7,12 @@
 ; CHECK: stp w0, w1, [x2]
 ; CHECK: ldr w0, [x2, #8]
 ; CHECK: ret
-define i32 @stp_int_aa(i32 %a, i32 %b, i32* nocapture %p) nounwind {
-  store i32 %a, i32* %p, align 4
-  %ld.ptr = getelementptr inbounds i32, i32* %p, i64 2
-  %tmp = load i32, i32* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 %b, i32* %add.ptr, align 4
+define i32 @stp_int_aa(i32 %a, i32 %b, ptr nocapture %p) nounwind {
+  store i32 %a, ptr %p, align 4
+  %ld.ptr = getelementptr inbounds i32, ptr %p, i64 2
+  %tmp = load i32, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 %b, ptr %add.ptr, align 4
   ret i32 %tmp
 }
 
@@ -20,12 +20,12 @@ define i32 @stp_int_aa(i32 %a, i32 %b, i32* nocapture %p) nounwind {
 ; CHECK: stp x0, x1, [x2]
 ; CHECK: ldr x0, [x2, #16]
 ; CHECK: ret
-define i64 @stp_long_aa(i64 %a, i64 %b, i64* nocapture %p) nounwind {
-  store i64 %a, i64* %p, align 8
-  %ld.ptr = getelementptr inbounds i64, i64* %p, i64 2
-  %tmp = load i64, i64* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
-  store i64 %b, i64* %add.ptr, align 8
+define i64 @stp_long_aa(i64 %a, i64 %b, ptr nocapture %p) nounwind {
+  store i64 %a, ptr %p, align 8
+  %ld.ptr = getelementptr inbounds i64, ptr %p, i64 2
+  %tmp = load i64, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds i64, ptr %p, i64 1
+  store i64 %b, ptr %add.ptr, align 8
   ret i64 %tmp
 }
 
@@ -33,12 +33,12 @@ define i64 @stp_long_aa(i64 %a, i64 %b, i64* nocapture %p) nounwind {
 ; CHECK: stp s0, s1, [x0]
 ; CHECK: ldr s0, [x0, #8]
 ; CHECK: ret
-define float @stp_float_aa(float %a, float %b, float* nocapture %p) nounwind {
-  store float %a, float* %p, align 4
-  %ld.ptr = getelementptr inbounds float, float* %p, i64 2
-  %tmp = load float, float* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds float, float* %p, i64 1
-  store float %b, float* %add.ptr, align 4
+define float @stp_float_aa(float %a, float %b, ptr nocapture %p) nounwind {
+  store float %a, ptr %p, align 4
+  %ld.ptr = getelementptr inbounds float, ptr %p, i64 2
+  %tmp = load float, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds float, ptr %p, i64 1
+  store float %b, ptr %add.ptr, align 4
   ret float %tmp
 }
 
@@ -46,12 +46,12 @@ define float @stp_float_aa(float %a, float %b, float* nocapture %p) nounwind {
 ; CHECK: stp d0, d1, [x0]
 ; CHECK: ldr d0, [x0, #16]
 ; CHECK: ret
-define double @stp_double_aa(double %a, double %b, double* nocapture %p) nounwind {
-  store double %a, double* %p, align 8
-  %ld.ptr = getelementptr inbounds double, double* %p, i64 2
-  %tmp = load double, double* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds double, double* %p, i64 1
-  store double %b, double* %add.ptr, align 8
+define double @stp_double_aa(double %a, double %b, ptr nocapture %p) nounwind {
+  store double %a, ptr %p, align 8
+  %ld.ptr = getelementptr inbounds double, ptr %p, i64 2
+  %tmp = load double, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds double, ptr %p, i64 1
+  store double %b, ptr %add.ptr, align 8
   ret double %tmp
 }
 
@@ -62,12 +62,12 @@ define double @stp_double_aa(double %a, double %b, double* nocapture %p) nounwin
 ; CHECK: ldr w0, [x3, #4]
 ; CHECK: stp w1, w2, [x3]
 ; CHECK: ret
-define i32 @stp_int_aa_after(i32 %w0, i32 %a, i32 %b, i32* nocapture %p) nounwind {
-  store i32 %a, i32* %p, align 4
-  %ld.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp = load i32, i32* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 %b, i32* %add.ptr, align 4
+define i32 @stp_int_aa_after(i32 %w0, i32 %a, i32 %b, ptr nocapture %p) nounwind {
+  store i32 %a, ptr %p, align 4
+  %ld.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp = load i32, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 %b, ptr %add.ptr, align 4
   ret i32 %tmp
 }
 
@@ -75,12 +75,12 @@ define i32 @stp_int_aa_after(i32 %w0, i32 %a, i32 %b, i32* nocapture %p) nounwin
 ; CHECK: ldr x0, [x3, #8]
 ; CHECK: stp x1, x2, [x3]
 ; CHECK: ret
-define i64 @stp_long_aa_after(i64 %x0, i64 %a, i64 %b, i64* nocapture %p) nounwind {
-  store i64 %a, i64* %p, align 8
-  %ld.ptr = getelementptr inbounds i64, i64* %p, i64 1
-  %tmp = load i64, i64* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
-  store i64 %b, i64* %add.ptr, align 8
+define i64 @stp_long_aa_after(i64 %x0, i64 %a, i64 %b, ptr nocapture %p) nounwind {
+  store i64 %a, ptr %p, align 8
+  %ld.ptr = getelementptr inbounds i64, ptr %p, i64 1
+  %tmp = load i64, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds i64, ptr %p, i64 1
+  store i64 %b, ptr %add.ptr, align 8
   ret i64 %tmp
 }
 
@@ -88,12 +88,12 @@ define i64 @stp_long_aa_after(i64 %x0, i64 %a, i64 %b, i64* nocapture %p) nounwi
 ; CHECK: ldr s0, [x0, #4]
 ; CHECK: stp s1, s2, [x0]
 ; CHECK: ret
-define float @stp_float_aa_after(float %s0, float %a, float %b, float* nocapture %p) nounwind {
-  store float %a, float* %p, align 4
-  %ld.ptr = getelementptr inbounds float, float* %p, i64 1
-  %tmp = load float, float* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds float, float* %p, i64 1
-  store float %b, float* %add.ptr, align 4
+define float @stp_float_aa_after(float %s0, float %a, float %b, ptr nocapture %p) nounwind {
+  store float %a, ptr %p, align 4
+  %ld.ptr = getelementptr inbounds float, ptr %p, i64 1
+  %tmp = load float, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds float, ptr %p, i64 1
+  store float %b, ptr %add.ptr, align 4
   ret float %tmp
 }
 
@@ -101,12 +101,12 @@ define float @stp_float_aa_after(float %s0, float %a, float %b, float* nocapture
 ; CHECK: ldr d0, [x0, #8]
 ; CHECK: stp d1, d2, [x0]
 ; CHECK: ret
-define double @stp_double_aa_after(double %d0, double %a, double %b, double* nocapture %p) nounwind {
-  store double %a, double* %p, align 8
-  %ld.ptr = getelementptr inbounds double, double* %p, i64 1
-  %tmp = load double, double* %ld.ptr, align 4
-  %add.ptr = getelementptr inbounds double, double* %p, i64 1
-  store double %b, double* %add.ptr, align 8
+define double @stp_double_aa_after(double %d0, double %a, double %b, ptr nocapture %p) nounwind {
+  store double %a, ptr %p, align 8
+  %ld.ptr = getelementptr inbounds double, ptr %p, i64 1
+  %tmp = load double, ptr %ld.ptr, align 4
+  %add.ptr = getelementptr inbounds double, ptr %p, i64 1
+  store double %b, ptr %add.ptr, align 8
   ret double %tmp
 }
 
@@ -118,28 +118,24 @@ define double @stp_double_aa_after(double %d0, double %a, double %b, double* noc
 ; CHECK: stp q0, q1, [x{{[0-9]+}}]
 ; CHECK: fadd
 ; CHECK: stp q2, q0, [x{{[0-9]+}}, #32]
-define void @st1(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* %base, i64 %index) {
+define void @st1(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, ptr %base, i64 %index) {
 entry:
-  %a0 = getelementptr inbounds float, float* %base, i64 %index
-  %b0 = getelementptr float, float* %a0, i64 4
-  %c0 = getelementptr float, float* %a0, i64 8
-  %d0 = getelementptr float, float* %a0, i64 12
+  %a0 = getelementptr inbounds float, ptr %base, i64 %index
+  %b0 = getelementptr float, ptr %a0, i64 4
+  %c0 = getelementptr float, ptr %a0, i64 8
+  %d0 = getelementptr float, ptr %a0, i64 12
 
-  %a1 = bitcast float* %a0 to <4 x float>*
-  %b1 = bitcast float* %b0 to <4 x float>*
-  %c1 = bitcast float* %c0 to <4 x float>*
-  %d1 = bitcast float* %d0 to <4 x float>*
 
-  store <4 x float> %c, <4 x float> * %c1, align 4
-  store <4 x float> %a, <4 x float> * %a1, align 4
+  store <4 x float> %c, ptr %c0, align 4
+  store <4 x float> %a, ptr %a0, align 4
 
   ; This fadd forces the compiler to pair %c and %e after fadd, and leave the
   ; stores %a and %b separated by a stp. The dependence analysis needs then to
   ; prove that it is safe to move %b past the stp to be paired with %a.
   %e = fadd fast <4 x float> %d, %a
 
-  store <4 x float> %e, <4 x float>* %d1, align 4
-  store <4 x float> %b, <4 x float>* %b1, align 4
+  store <4 x float> %e, ptr %d0, align 4
+  store <4 x float> %b, ptr %b0, align 4
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-stp.ll b/llvm/test/CodeGen/AArch64/arm64-stp.ll
index d3fd87e5a898c..94777924d64d3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stp.ll
@@ -2,106 +2,106 @@
 
 ; CHECK-LABEL: stp_int
 ; CHECK: stp w0, w1, [x2]
-define void @stp_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
-  store i32 %a, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 %b, i32* %add.ptr, align 4
+define void @stp_int(i32 %a, i32 %b, ptr nocapture %p) nounwind {
+  store i32 %a, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 %b, ptr %add.ptr, align 4
   ret void
 }
 
 ; CHECK-LABEL: stp_long
 ; CHECK: stp x0, x1, [x2]
-define void @stp_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
-  store i64 %a, i64* %p, align 8
-  %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
-  store i64 %b, i64* %add.ptr, align 8
+define void @stp_long(i64 %a, i64 %b, ptr nocapture %p) nounwind {
+  store i64 %a, ptr %p, align 8
+  %add.ptr = getelementptr inbounds i64, ptr %p, i64 1
+  store i64 %b, ptr %add.ptr, align 8
   ret void
 }
 
 ; CHECK-LABEL: stp_float
 ; CHECK: stp s0, s1, [x0]
-define void @stp_float(float %a, float %b, float* nocapture %p) nounwind {
-  store float %a, float* %p, align 4
-  %add.ptr = getelementptr inbounds float, float* %p, i64 1
-  store float %b, float* %add.ptr, align 4
+define void @stp_float(float %a, float %b, ptr nocapture %p) nounwind {
+  store float %a, ptr %p, align 4
+  %add.ptr = getelementptr inbounds float, ptr %p, i64 1
+  store float %b, ptr %add.ptr, align 4
   ret void
 }
 
 ; CHECK-LABEL: stp_double
 ; CHECK: stp d0, d1, [x0]
-define void @stp_double(double %a, double %b, double* nocapture %p) nounwind {
-  store double %a, double* %p, align 8
-  %add.ptr = getelementptr inbounds double, double* %p, i64 1
-  store double %b, double* %add.ptr, align 8
+define void @stp_double(double %a, double %b, ptr nocapture %p) nounwind {
+  store double %a, ptr %p, align 8
+  %add.ptr = getelementptr inbounds double, ptr %p, i64 1
+  store double %b, ptr %add.ptr, align 8
   ret void
 }
 
 ; CHECK-LABEL: stp_doublex2
 ; CHECK: stp q0, q1, [x0]
-define void @stp_doublex2(<2 x double> %a, <2 x double> %b, <2 x double>* nocapture %p) nounwind {
-  store <2 x double> %a, <2 x double>* %p, align 16
-  %add.ptr = getelementptr inbounds <2 x double>, <2 x double>* %p, i64 1
-  store <2 x double> %b, <2 x double>* %add.ptr, align 16
+define void @stp_doublex2(<2 x double> %a, <2 x double> %b, ptr nocapture %p) nounwind {
+  store <2 x double> %a, ptr %p, align 16
+  %add.ptr = getelementptr inbounds <2 x double>, ptr %p, i64 1
+  store <2 x double> %b, ptr %add.ptr, align 16
   ret void
 }
 
 ; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
-define void @stur_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
+define void @stur_int(i32 %a, i32 %b, ptr nocapture %p) nounwind {
 ; CHECK-LABEL: stur_int
 ; CHECK: stp w{{[0-9]+}}, {{w[0-9]+}}, [x{{[0-9]+}}, #-8]
 ; CHECK-NEXT: ret
-  %p1 = getelementptr inbounds i32, i32* %p, i32 -1
-  store i32 %a, i32* %p1, align 2
-  %p2 = getelementptr inbounds i32, i32* %p, i32 -2
-  store i32 %b, i32* %p2, align 2
+  %p1 = getelementptr inbounds i32, ptr %p, i32 -1
+  store i32 %a, ptr %p1, align 2
+  %p2 = getelementptr inbounds i32, ptr %p, i32 -2
+  store i32 %b, ptr %p2, align 2
   ret void
 }
 
-define void @stur_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
+define void @stur_long(i64 %a, i64 %b, ptr nocapture %p) nounwind {
 ; CHECK-LABEL: stur_long
 ; CHECK: stp x{{[0-9]+}}, {{x[0-9]+}}, [x{{[0-9]+}}, #-16]
 ; CHECK-NEXT: ret
-  %p1 = getelementptr inbounds i64, i64* %p, i32 -1
-  store i64 %a, i64* %p1, align 2
-  %p2 = getelementptr inbounds i64, i64* %p, i32 -2
-  store i64 %b, i64* %p2, align 2
+  %p1 = getelementptr inbounds i64, ptr %p, i32 -1
+  store i64 %a, ptr %p1, align 2
+  %p2 = getelementptr inbounds i64, ptr %p, i32 -2
+  store i64 %b, ptr %p2, align 2
   ret void
 }
 
-define void @stur_float(float %a, float %b, float* nocapture %p) nounwind {
+define void @stur_float(float %a, float %b, ptr nocapture %p) nounwind {
 ; CHECK-LABEL: stur_float
 ; CHECK: stp s{{[0-9]+}}, {{s[0-9]+}}, [x{{[0-9]+}}, #-8]
 ; CHECK-NEXT: ret
-  %p1 = getelementptr inbounds float, float* %p, i32 -1
-  store float %a, float* %p1, align 2
-  %p2 = getelementptr inbounds float, float* %p, i32 -2
-  store float %b, float* %p2, align 2
+  %p1 = getelementptr inbounds float, ptr %p, i32 -1
+  store float %a, ptr %p1, align 2
+  %p2 = getelementptr inbounds float, ptr %p, i32 -2
+  store float %b, ptr %p2, align 2
   ret void
 }
 
-define void @stur_double(double %a, double %b, double* nocapture %p) nounwind {
+define void @stur_double(double %a, double %b, ptr nocapture %p) nounwind {
 ; CHECK-LABEL: stur_double
 ; CHECK: stp d{{[0-9]+}}, {{d[0-9]+}}, [x{{[0-9]+}}, #-16]
 ; CHECK-NEXT: ret
-  %p1 = getelementptr inbounds double, double* %p, i32 -1
-  store double %a, double* %p1, align 2
-  %p2 = getelementptr inbounds double, double* %p, i32 -2
-  store double %b, double* %p2, align 2
+  %p1 = getelementptr inbounds double, ptr %p, i32 -1
+  store double %a, ptr %p1, align 2
+  %p2 = getelementptr inbounds double, ptr %p, i32 -2
+  store double %b, ptr %p2, align 2
   ret void
 }
 
-define void @stur_doublex2(<2 x double> %a, <2 x double> %b, <2 x double>* nocapture %p) nounwind {
+define void @stur_doublex2(<2 x double> %a, <2 x double> %b, ptr nocapture %p) nounwind {
 ; CHECK-LABEL: stur_doublex2
 ; CHECK: stp q{{[0-9]+}}, q{{[0-9]+}}, [x{{[0-9]+}}, #-32]
 ; CHECK-NEXT: ret
-  %p1 = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -1
-  store <2 x double> %a, <2 x double>* %p1, align 2
-  %p2 = getelementptr inbounds <2 x double>, <2 x double>* %p, i32 -2
-  store <2 x double> %b, <2 x double>* %p2, align 2
+  %p1 = getelementptr inbounds <2 x double>, ptr %p, i32 -1
+  store <2 x double> %a, ptr %p1, align 2
+  %p2 = getelementptr inbounds <2 x double>, ptr %p, i32 -2
+  store <2 x double> %b, ptr %p2, align 2
   ret void
 }
 
-define void @splat_v4i32(i32 %v, i32 *%p) {
+define void @splat_v4i32(i32 %v, ptr %p) {
 entry:
 
 ; CHECK-LABEL: splat_v4i32
@@ -113,14 +113,13 @@ entry:
   %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1
   %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2
   %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3
-  %p21 = bitcast i32* %p to <4 x i32>*
-  store <4 x i32> %p20, <4 x i32>* %p21, align 4
+  store <4 x i32> %p20, ptr %p, align 4
   ret void
 }
 
 ; Check that a non-splat store that is storing a vector created by 4
 ; insertelements that is not a splat vector does not get split.
-define void @nosplat_v4i32(i32 %v, i32 *%p) {
+define void @nosplat_v4i32(i32 %v, ptr %p) {
 entry:
 
 ; CHECK-LABEL: nosplat_v4i32:
@@ -136,14 +135,13 @@ entry:
   %p18 = insertelement <4 x i32> %p17, i32 %v, i32 1
   %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2
   %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3
-  %p21 = bitcast i32* %p to <4 x i32>*
-  store <4 x i32> %p20, <4 x i32>* %p21, align 4
+  store <4 x i32> %p20, ptr %p, align 4
   ret void
 }
 
 ; Check that a non-splat store that is storing a vector created by 4
 ; insertelements that is not a splat vector does not get split.
-define void @nosplat2_v4i32(i32 %v, i32 *%p, <4 x i32> %vin) {
+define void @nosplat2_v4i32(i32 %v, ptr %p, <4 x i32> %vin) {
 entry:
 
 ; CHECK-LABEL: nosplat2_v4i32:
@@ -156,8 +154,7 @@ entry:
   %p18 = insertelement <4 x i32> %vin, i32 %v, i32 1
   %p19 = insertelement <4 x i32> %p18, i32 %v, i32 2
   %p20 = insertelement <4 x i32> %p19, i32 %v, i32 3
-  %p21 = bitcast i32* %p to <4 x i32>*
-  store <4 x i32> %p20, <4 x i32>* %p21, align 4
+  store <4 x i32> %p20, ptr %p, align 4
   ret void
 }
 
@@ -167,13 +164,13 @@ entry:
 ; CHECK: add w8, [[REG]], w1
 ; CHECK: stp w0, w1, [x2]
 ; CHECK: ret
-define i32 @stp_int_rar_hazard(i32 %a, i32 %b, i32* nocapture %p) nounwind {
-  store i32 %a, i32* %p, align 4
-  %ld.ptr = getelementptr inbounds i32, i32* %p, i64 2
-  %tmp = load i32, i32* %ld.ptr, align 4
+define i32 @stp_int_rar_hazard(i32 %a, i32 %b, ptr nocapture %p) nounwind {
+  store i32 %a, ptr %p, align 4
+  %ld.ptr = getelementptr inbounds i32, ptr %p, i64 2
+  %tmp = load i32, ptr %ld.ptr, align 4
   %tmp2 = add i32 %tmp, %b
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 %b, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 %b, ptr %add.ptr, align 4
   ret i32 %tmp2
 }
 
@@ -183,12 +180,12 @@ define i32 @stp_int_rar_hazard(i32 %a, i32 %b, i32* nocapture %p) nounwind {
 ; CHECK: add w0, [[REG]], w2
 ; CHECK: stp w1, w2, [x3]
 ; CHECK: ret
-define i32 @stp_int_rar_hazard_after(i32 %w0, i32 %a, i32 %b, i32* nocapture %p) nounwind {
-  store i32 %a, i32* %p, align 4
-  %ld.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  %tmp = load i32, i32* %ld.ptr, align 4
+define i32 @stp_int_rar_hazard_after(i32 %w0, i32 %a, i32 %b, ptr nocapture %p) nounwind {
+  store i32 %a, ptr %p, align 4
+  %ld.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  %tmp = load i32, ptr %ld.ptr, align 4
   %tmp2 = add i32 %tmp, %b
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
-  store i32 %b, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
+  store i32 %b, ptr %add.ptr, align 4
   ret i32 %tmp2
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-strict-align.ll b/llvm/test/CodeGen/AArch64/arm64-strict-align.ll
index a7450349766fe..d2335e4121bf0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-strict-align.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-strict-align.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-apple-darwin -mattr=+strict-align | FileCheck %s --check-prefix=CHECK-STRICT
 ; RUN: llc < %s -mtriple=arm64-apple-darwin -mattr=+strict-align -fast-isel | FileCheck %s --check-prefix=CHECK-STRICT
 
-define i32 @f0(i32* nocapture %p) nounwind {
+define i32 @f0(ptr nocapture %p) nounwind {
 ; CHECK-STRICT: ldrh [[HIGH:w[0-9]+]], [x0, #2]
 ; CHECK-STRICT: ldrh [[LOW:w[0-9]+]], [x0]
 ; CHECK-STRICT: orr w0, [[LOW]], [[HIGH]], lsl #16
@@ -10,17 +10,17 @@ define i32 @f0(i32* nocapture %p) nounwind {
 
 ; CHECK: ldr w0, [x0]
 ; CHECK: ret
-  %tmp = load i32, i32* %p, align 2
+  %tmp = load i32, ptr %p, align 2
   ret i32 %tmp
 }
 
-define i64 @f1(i64* nocapture %p) nounwind {
+define i64 @f1(ptr nocapture %p) nounwind {
 ; CHECK-STRICT:	ldp	w[[LOW:[0-9]+]], w[[HIGH:[0-9]+]], [x0]
 ; CHECK-STRICT: orr x0, x[[LOW]], x[[HIGH]], lsl #32
 ; CHECK-STRICT:	ret
 
 ; CHECK: ldr x0, [x0]
 ; CHECK: ret
-  %tmp = load i64, i64* %p, align 4
+  %tmp = load i64, ptr %p, align 4
   ret i64 %tmp
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-stur.ll b/llvm/test/CodeGen/AArch64/arm64-stur.ll
index d4ac3630bc1a8..2a74abb10226d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stur.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stur.ll
@@ -1,65 +1,64 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone -mattr=+slow-misaligned-128store | FileCheck %s
 %struct.X = type <{ i32, i64, i64 }>
 
-define void @foo1(i32* %p, i64 %val) nounwind {
+define void @foo1(ptr %p, i64 %val) nounwind {
 ; CHECK-LABEL: foo1:
 ; CHECK: 	stur	w1, [x0, #-4]
 ; CHECK-NEXT: 	ret
   %tmp1 = trunc i64 %val to i32
-  %ptr = getelementptr inbounds i32, i32* %p, i64 -1
-  store i32 %tmp1, i32* %ptr, align 4
+  %ptr = getelementptr inbounds i32, ptr %p, i64 -1
+  store i32 %tmp1, ptr %ptr, align 4
   ret void
 }
-define void @foo2(i16* %p, i64 %val) nounwind {
+define void @foo2(ptr %p, i64 %val) nounwind {
 ; CHECK-LABEL: foo2:
 ; CHECK: 	sturh	w1, [x0, #-2]
 ; CHECK-NEXT: 	ret
   %tmp1 = trunc i64 %val to i16
-  %ptr = getelementptr inbounds i16, i16* %p, i64 -1
-  store i16 %tmp1, i16* %ptr, align 2
+  %ptr = getelementptr inbounds i16, ptr %p, i64 -1
+  store i16 %tmp1, ptr %ptr, align 2
   ret void
 }
-define void @foo3(i8* %p, i64 %val) nounwind {
+define void @foo3(ptr %p, i64 %val) nounwind {
 ; CHECK-LABEL: foo3:
 ; CHECK: 	sturb	w1, [x0, #-1]
 ; CHECK-NEXT: 	ret
   %tmp1 = trunc i64 %val to i8
-  %ptr = getelementptr inbounds i8, i8* %p, i64 -1
-  store i8 %tmp1, i8* %ptr, align 1
+  %ptr = getelementptr inbounds i8, ptr %p, i64 -1
+  store i8 %tmp1, ptr %ptr, align 1
   ret void
 }
-define void @foo4(i16* %p, i32 %val) nounwind {
+define void @foo4(ptr %p, i32 %val) nounwind {
 ; CHECK-LABEL: foo4:
 ; CHECK: 	sturh	w1, [x0, #-2]
 ; CHECK-NEXT: 	ret
   %tmp1 = trunc i32 %val to i16
-  %ptr = getelementptr inbounds i16, i16* %p, i32 -1
-  store i16 %tmp1, i16* %ptr, align 2
+  %ptr = getelementptr inbounds i16, ptr %p, i32 -1
+  store i16 %tmp1, ptr %ptr, align 2
   ret void
 }
-define void @foo5(i8* %p, i32 %val) nounwind {
+define void @foo5(ptr %p, i32 %val) nounwind {
 ; CHECK-LABEL: foo5:
 ; CHECK: 	sturb	w1, [x0, #-1]
 ; CHECK-NEXT: 	ret
   %tmp1 = trunc i32 %val to i8
-  %ptr = getelementptr inbounds i8, i8* %p, i32 -1
-  store i8 %tmp1, i8* %ptr, align 1
+  %ptr = getelementptr inbounds i8, ptr %p, i32 -1
+  store i8 %tmp1, ptr %ptr, align 1
   ret void
 }
 
-define void @foo(%struct.X* nocapture %p) nounwind optsize ssp {
+define void @foo(ptr nocapture %p) nounwind optsize ssp {
 ; CHECK-LABEL: foo:
 ; CHECK-NOT: str
 ; CHECK: stur    xzr, [x0, #12]
 ; CHECK-NEXT: stur    xzr, [x0, #4]
 ; CHECK-NEXT: ret
-  %B = getelementptr inbounds %struct.X, %struct.X* %p, i64 0, i32 1
-  %val = bitcast i64* %B to i8*
-  call void @llvm.memset.p0i8.i64(i8* %val, i8 0, i64 16, i1 false)
+  %B = getelementptr inbounds %struct.X, ptr %p, i64 0, i32 1
+  call void @llvm.memset.p0.i64(ptr %B, i8 0, i64 16, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
 ; Unaligned 16b stores are split into 8b stores for performance.
 ; radar://15424193
@@ -69,15 +68,15 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
 ; CHECK: str     d[[REG:[0-9]+]], [x0]
 ; CHECK: ext.16b v[[REG2:[0-9]+]], v[[REG]], v[[REG]], #8
 ; CHECK: str     d[[REG2]], [x0, #8]
-define void @unaligned(<4 x i32>* %p, <4 x i32> %v) nounwind {
-  store <4 x i32> %v, <4 x i32>* %p, align 4
+define void @unaligned(ptr %p, <4 x i32> %v) nounwind {
+  store <4 x i32> %v, ptr %p, align 4
   ret void
 }
 
 ; CHECK-LABEL: aligned:
 ; CHECK: str q0
-define void @aligned(<4 x i32>* %p, <4 x i32> %v) nounwind {
-  store <4 x i32> %v, <4 x i32>* %p
+define void @aligned(ptr %p, <4 x i32> %v) nounwind {
+  store <4 x i32> %v, ptr %p
   ret void
 }
 
@@ -86,13 +85,13 @@ define void @aligned(<4 x i32>* %p, <4 x i32> %v) nounwind {
 
 ; CHECK-LABEL: twobytealign:
 ; CHECK: str q0
-define void @twobytealign(<4 x i32>* %p, <4 x i32> %v) nounwind {
-  store <4 x i32> %v, <4 x i32>* %p, align 2
+define void @twobytealign(ptr %p, <4 x i32> %v) nounwind {
+  store <4 x i32> %v, ptr %p, align 2
   ret void
 }
 ; CHECK-LABEL: onebytealign:
 ; CHECK: str q0
-define void @onebytealign(<4 x i32>* %p, <4 x i32> %v) nounwind {
-  store <4 x i32> %v, <4 x i32>* %p, align 1
+define void @onebytealign(ptr %p, <4 x i32> %v) nounwind {
+  store <4 x i32> %v, ptr %p, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-this-return.ll b/llvm/test/CodeGen/AArch64/arm64-this-return.ll
index 0aa5dc9edd411..93c50a38f1d18 100644
--- a/llvm/test/CodeGen/AArch64/arm64-this-return.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-this-return.ll
@@ -9,15 +9,15 @@
 %struct.D = type { %struct.B }
 %struct.E = type { %struct.B, %struct.B }
 
-declare %struct.A* @A_ctor_base(%struct.A* returned)
-declare %struct.B* @B_ctor_base(%struct.B* returned, i32)
-declare %struct.B* @B_ctor_complete(%struct.B* returned, i32)
+declare ptr @A_ctor_base(ptr returned)
+declare ptr @B_ctor_base(ptr returned, i32)
+declare ptr @B_ctor_complete(ptr returned, i32)
 
-declare %struct.A* @A_ctor_base_nothisret(%struct.A*)
-declare %struct.B* @B_ctor_base_nothisret(%struct.B*, i32)
-declare %struct.B* @B_ctor_complete_nothisret(%struct.B*, i32)
+declare ptr @A_ctor_base_nothisret(ptr)
+declare ptr @B_ctor_base_nothisret(ptr, i32)
+declare ptr @B_ctor_complete_nothisret(ptr, i32)
 
-define %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x) {
+define ptr @C_ctor_base(ptr returned %this, i32 %x) {
   ; GISEL-MIR-LABEL: name: C_ctor_base
   ; GISEL-MIR: bb.1.entry:
   ; GISEL-MIR:   liveins: $w1, $x0
@@ -37,14 +37,12 @@ entry:
 ; CHECK: bl {{_?A_ctor_base}}
 ; CHECK-NOT: mov x0, {{x[0-9]+}}
 ; CHECK: b {{_?B_ctor_base}}
-  %0 = bitcast %struct.C* %this to %struct.A*
-  %call = tail call %struct.A* @A_ctor_base(%struct.A* returned %0)
-  %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
-  %call2 = tail call %struct.B* @B_ctor_base(%struct.B* returned %1, i32 %x)
-  ret %struct.C* %this
+  %call = tail call ptr @A_ctor_base(ptr returned %this)
+  %call2 = tail call ptr @B_ctor_base(ptr returned %this, i32 %x)
+  ret ptr %this
 }
 
-define %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x) {
+define ptr @C_ctor_base_nothisret(ptr %this, i32 %x) {
   ; GISEL-MIR-LABEL: name: C_ctor_base_nothisret
   ; GISEL-MIR: bb.1.entry:
   ; GISEL-MIR:   liveins: $w1, $x0
@@ -69,14 +67,12 @@ entry:
 ; CHECK: bl {{_?A_ctor_base_nothisret}}
 ; CHECK: mov x0, [[SAVETHIS]]
 ; CHECK-NOT: b {{_?B_ctor_base_nothisret}}
-  %0 = bitcast %struct.C* %this to %struct.A*
-  %call = tail call %struct.A* @A_ctor_base_nothisret(%struct.A* %0)
-  %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
-  %call2 = tail call %struct.B* @B_ctor_base_nothisret(%struct.B* %1, i32 %x)
-  ret %struct.C* %this
+  %call = tail call ptr @A_ctor_base_nothisret(ptr %this)
+  %call2 = tail call ptr @B_ctor_base_nothisret(ptr %this, i32 %x)
+  ret ptr %this
 }
 
-define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
+define ptr @C_ctor_complete(ptr %this, i32 %x) {
   ; GISEL-MIR-LABEL: name: C_ctor_complete
   ; GISEL-MIR: bb.1.entry:
   ; GISEL-MIR:   liveins: $w1, $x0
@@ -88,11 +84,11 @@ define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
 entry:
 ; CHECK-LABEL: C_ctor_complete:
 ; CHECK: b {{_?C_ctor_base}}
-  %call = tail call %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x)
-  ret %struct.C* %this
+  %call = tail call ptr @C_ctor_base(ptr returned %this, i32 %x)
+  ret ptr %this
 }
 
-define %struct.C* @C_ctor_complete_nothisret(%struct.C* %this, i32 %x) {
+define ptr @C_ctor_complete_nothisret(ptr %this, i32 %x) {
   ; GISEL-MIR-LABEL: name: C_ctor_complete_nothisret
   ; GISEL-MIR: bb.1.entry:
   ; GISEL-MIR:   liveins: $w1, $x0
@@ -109,11 +105,11 @@ define %struct.C* @C_ctor_complete_nothisret(%struct.C* %this, i32 %x) {
 entry:
 ; CHECK-LABEL: C_ctor_complete_nothisret:
 ; CHECK-NOT: b {{_?C_ctor_base_nothisret}}
-  %call = tail call %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x)
-  ret %struct.C* %this
+  %call = tail call ptr @C_ctor_base_nothisret(ptr %this, i32 %x)
+  ret ptr %this
 }
 
-define %struct.D* @D_ctor_base(%struct.D* %this, i32 %x) {
+define ptr @D_ctor_base(ptr %this, i32 %x) {
   ; GISEL-MIR-LABEL: name: D_ctor_base
   ; GISEL-MIR: bb.1.entry:
   ; GISEL-MIR:   liveins: $w1, $x0
@@ -134,13 +130,12 @@ entry:
 ; CHECK: bl {{_?B_ctor_complete}}
 ; CHECK-NOT: mov x0, {{x[0-9]+}}
 ; CHECK: b {{_?B_ctor_complete}}
-  %b = getelementptr inbounds %struct.D, %struct.D* %this, i32 0, i32 0
-  %call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
-  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
-  ret %struct.D* %this
+  %call = tail call ptr @B_ctor_complete(ptr returned %this, i32 %x)
+  %call2 = tail call ptr @B_ctor_complete(ptr returned %this, i32 %x)
+  ret ptr %this
 }
 
-define %struct.E* @E_ctor_base(%struct.E* %this, i32 %x) {
+define ptr @E_ctor_base(ptr %this, i32 %x) {
   ; GISEL-MIR-LABEL: name: E_ctor_base
   ; GISEL-MIR: bb.1.entry:
   ; GISEL-MIR:   liveins: $w1, $x0
@@ -165,9 +160,8 @@ define %struct.E* @E_ctor_base(%struct.E* %this, i32 %x) {
 entry:
 ; CHECK-LABEL: E_ctor_base:
 ; CHECK-NOT: b {{_?B_ctor_complete}}
-  %b = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 0
-  %call = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b, i32 %x)
-  %b2 = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 1
-  %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* returned %b2, i32 %x)
-  ret %struct.E* %this
+  %call = tail call ptr @B_ctor_complete(ptr returned %this, i32 %x)
+  %b2 = getelementptr inbounds %struct.E, ptr %this, i32 0, i32 1
+  %call2 = tail call ptr @B_ctor_complete(ptr returned %b2, i32 %x)
+  ret ptr %this
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll b/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll
index 06d214b1b0acf..49055e3975c05 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll
@@ -14,6 +14,6 @@ define i8 @get_var() {
 ; CHECK: blr [[TLV_GET_ADDR]]
 ; CHECK: ldrb w0, [x0]
 
-  %val = load i8, i8* @var, align 1
+  %val = load i8, ptr @var, align 1
   ret i8 %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll b/llvm/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
index 9f77d3527d4b9..57f8c6fd73109 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
@@ -13,7 +13,7 @@
 define i32 @test_generaldynamic() {
 ; CHECK-LABEL: test_generaldynamic:
 
-  %val = load i32, i32* @general_dynamic_var
+  %val = load i32, ptr @general_dynamic_var
   ret i32 %val
 
 ; NOEMU: .tlsdesccall general_dynamic_var
@@ -32,7 +32,7 @@ define i32 @test_generaldynamic() {
 define i32 @test_emulated_init() {
 ; COMMON-LABEL: test_emulated_init:
 
-  %val = load i32, i32* @emulated_init_var
+  %val = load i32, ptr @emulated_init_var
   ret i32 %val
 
 ; EMU: adrp{{.+}}__emutls_v.emulated_init_var

diff  --git a/llvm/test/CodeGen/AArch64/arm64-tls-dynamics.ll b/llvm/test/CodeGen/AArch64/arm64-tls-dynamics.ll
index f1ec942f2dcea..c12730bd3b0d7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tls-dynamics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tls-dynamics.ll
@@ -18,7 +18,7 @@
 define i32 @test_generaldynamic() {
 ; CHECK-LABEL: test_generaldynamic:
 
-  %val = load i32, i32* @general_dynamic_var
+  %val = load i32, ptr @general_dynamic_var
   ret i32 %val
 
 ; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
@@ -51,10 +51,10 @@ define i32 @test_generaldynamic() {
 
 }
 
-define i32* @test_generaldynamic_addr() {
+define ptr @test_generaldynamic_addr() {
 ; CHECK-LABEL: test_generaldynamic_addr:
 
-  ret i32* @general_dynamic_var
+  ret ptr @general_dynamic_var
 
 ; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
 ; CHECK-NEXT: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], :tlsdesc_lo12:general_dynamic_var]
@@ -82,7 +82,7 @@ define i32* @test_generaldynamic_addr() {
 define i32 @test_localdynamic() {
 ; CHECK-LABEL: test_localdynamic:
 
-  %val = load i32, i32* @local_dynamic_var
+  %val = load i32, ptr @local_dynamic_var
   ret i32 %val
 
 ; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
@@ -118,7 +118,7 @@ define i32 @test_localdynamic() {
 
 }
 
-define i32* @test_localdynamic_addr() {
+define ptr @test_localdynamic_addr() {
 ; CHECK-LABEL: test_localdynamic_addr:
 
 ; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
@@ -138,7 +138,7 @@ define i32* @test_localdynamic_addr() {
 ; CHECK-NOLD-NEXT: blr [[CALLEE]]
 ; CHECK-NOLD: mrs x[[TPIDR:[0-9]+]], TPIDR_EL0
 ; CHECK-NOLD: add x0, x[[TPIDR]], x0
-  ret i32* @local_dynamic_var
+  ret ptr @local_dynamic_var
 
 ; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE21
 ; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12
@@ -161,8 +161,8 @@ define i32* @test_localdynamic_addr() {
 define i32 @test_localdynamic_deduplicate() {
 ; CHECK-LABEL: test_localdynamic_deduplicate:
 
-  %val = load i32, i32* @local_dynamic_var
-  %val2 = load i32, i32* @local_dynamic_var2
+  %val = load i32, ptr @local_dynamic_var
+  %val2 = load i32, ptr @local_dynamic_var2
 
   %sum = add i32 %val, %val2
   ret i32 %sum

diff  --git a/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll b/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll
index d6df1a3907bc5..c5a1144964976 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll
@@ -11,7 +11,7 @@
 
 define i32 @test_initial_exec() {
 ; CHECK-LABEL: test_initial_exec:
-  %val = load i32, i32* @initial_exec_var
+  %val = load i32, ptr @initial_exec_var
 
 ; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
 ; CHECK: ldr x[[TP_OFFSET:[0-9]+]], [x[[GOTADDR]], :gottprel_lo12:initial_exec_var]
@@ -30,9 +30,9 @@ define i32 @test_initial_exec() {
   ret i32 %val
 }
 
-define i32* @test_initial_exec_addr() {
+define ptr @test_initial_exec_addr() {
 ; CHECK-LABEL: test_initial_exec_addr:
-  ret i32* @initial_exec_var
+  ret ptr @initial_exec_var
 
 ; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
 ; CHECK: ldr [[TP_OFFSET:x[0-9]+]], [x[[GOTADDR]], :gottprel_lo12:initial_exec_var]

diff  --git a/llvm/test/CodeGen/AArch64/arm64-tls-local-exec.ll b/llvm/test/CodeGen/AArch64/arm64-tls-local-exec.ll
index 5d1b6f2afc7e1..59d5500ce534e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tls-local-exec.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tls-local-exec.ll
@@ -30,7 +30,7 @@
 
 define i32 @test_local_exec() {
 ; CHECK-LABEL: test_local_exec:
-  %val = load i32, i32* @local_exec_var
+  %val = load i32, ptr @local_exec_var
 
 ; CHECK-12: mrs x[[R1:[0-9]+]], TPIDR_EL0
 ; CHECK-12: add x[[R2:[0-9]+]], x[[R1]], :tprel_lo12:local_exec_var
@@ -66,9 +66,9 @@ define i32 @test_local_exec() {
   ret i32 %val
 }
 
-define i32* @test_local_exec_addr() {
+define ptr @test_local_exec_addr() {
 ; CHECK-LABEL: test_local_exec_addr:
-  ret i32* @local_exec_var
+  ret ptr @local_exec_var
 
 ; CHECK-12: mrs x[[R1:[0-9]+]], TPIDR_EL0
 ; CHECK-12: add x0, x[[R1]], :tprel_lo12:local_exec_var

diff  --git a/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll b/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
index f741564084998..fc5935082ff07 100644
--- a/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a53 -enable-aa-sched-mi | FileCheck %s
 ; Check that the scheduler moves the load from a[1] past the store into a[2].
- at a = common global i32* null, align 8
+ at a = common global ptr null, align 8
 @m = common global i32 0, align 4
 
 ; Function Attrs: nounwind
@@ -8,13 +8,13 @@ define i32 @func(i32 %i, i32 %j, i32 %k) #0 {
 entry:
 ; CHECK: ldr {{w[0-9]+}}, [x[[REG:[0-9]+]], #4]
 ; CHECK: str {{w[0-9]+}}, [x[[REG]], #8]
-  %0 = load i32*, i32** @a, align 8, !tbaa !1
-  %arrayidx = getelementptr inbounds i32, i32* %0, i64 2
-  store i32 %i, i32* %arrayidx, align 4, !tbaa !5
-  %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 1
-  %1 = load i32, i32* %arrayidx1, align 4, !tbaa !5
+  %0 = load ptr, ptr @a, align 8, !tbaa !1
+  %arrayidx = getelementptr inbounds i32, ptr %0, i64 2
+  store i32 %i, ptr %arrayidx, align 4, !tbaa !5
+  %arrayidx1 = getelementptr inbounds i32, ptr %0, i64 1
+  %1 = load i32, ptr %arrayidx1, align 4, !tbaa !5
   %add = add nsw i32 %k, %i
-  store i32 %add, i32* @m, align 4, !tbaa !5
+  store i32 %add, ptr @m, align 4, !tbaa !5
   ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-trn.ll b/llvm/test/CodeGen/AArch64/arm64-trn.ll
index 125610ec93dce..b728a39cf00de 100644
--- a/llvm/test/CodeGen/AArch64/arm64-trn.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-trn.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s --check-prefixes=CHECKLE
 ; RUN: llc < %s -mtriple=aarch64_be-none-eabi | FileCheck %s --check-prefixes=CHECKBE
 
-define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vtrni8(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrni8:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
@@ -21,15 +21,15 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ; CHECKBE-NEXT:    add v0.8b, v2.8b, v0.8b
 ; CHECKBE-NEXT:    rev64 v0.8b, v0.8b
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
 	ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @vtrni16(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrni16:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
@@ -48,15 +48,15 @@ define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
 ; CHECKBE-NEXT:    add v0.4h, v2.4h, v0.4h
 ; CHECKBE-NEXT:    rev64 v0.4h, v0.4h
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
 	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
         %tmp5 = add <4 x i16> %tmp3, %tmp4
 	ret <4 x i16> %tmp5
 }
 
-define <8 x i8> @vtrni16_viabitcast(<4 x i16> *%A, <4 x i16> *%B) nounwind {
+define <8 x i8> @vtrni16_viabitcast(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrni16_viabitcast:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
@@ -71,8 +71,8 @@ define <8 x i8> @vtrni16_viabitcast(<4 x i16> *%A, <4 x i16> *%B) nounwind {
 ; CHECKBE-NEXT:    trn1 v0.4h, v0.4h, v1.4h
 ; CHECKBE-NEXT:    rev64 v0.4h, v0.4h
 ; CHECKBE-NEXT:    ret
-  %l1 = load <4 x i16>, <4 x i16> *%A
-  %l2 = load <4 x i16>, <4 x i16> *%B
+  %l1 = load <4 x i16>, ptr %A
+  %l2 = load <4 x i16>, ptr %B
   %b1 = bitcast <4 x i16> %l1 to <8 x i8>
   %b2 = bitcast <4 x i16> %l2 to <8 x i8>
   %tmp3 = shufflevector <8 x i8> %b1, <8 x i8> %b2, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 12, i32 13>
@@ -80,7 +80,7 @@ define <8 x i8> @vtrni16_viabitcast(<4 x i16> *%A, <4 x i16> *%B) nounwind {
 }
 
 ; 2xi32 TRN is redundant with ZIP
-define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @vtrni32(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrni32:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
@@ -99,15 +99,15 @@ define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
 ; CHECKBE-NEXT:    add v0.2s, v2.2s, v0.2s
 ; CHECKBE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
 	%tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
         %tmp5 = add <2 x i32> %tmp3, %tmp4
 	ret <2 x i32> %tmp5
 }
 
-define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @vtrnf(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrnf:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
@@ -126,15 +126,15 @@ define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
 ; CHECKBE-NEXT:    fadd v0.2s, v2.2s, v0.2s
 ; CHECKBE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
 	%tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
         %tmp5 = fadd <2 x float> %tmp3, %tmp4
 	ret <2 x float> %tmp5
 }
 
-define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vtrnQi8(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrnQi8:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr q0, [x0]
@@ -154,15 +154,15 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
 ; CHECKBE-NEXT:    rev64 v0.16b, v0.16b
 ; CHECKBE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4
 	ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vtrnQi16(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrnQi16:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr q0, [x0]
@@ -182,15 +182,15 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ; CHECKBE-NEXT:    rev64 v0.8h, v0.8h
 ; CHECKBE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4
 	ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @vtrnQi32(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrnQi32:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr q0, [x0]
@@ -210,15 +210,15 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 ; CHECKBE-NEXT:    rev64 v0.4s, v0.4s
 ; CHECKBE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
 	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
         %tmp5 = add <4 x i32> %tmp3, %tmp4
 	ret <4 x i32> %tmp5
 }
 
-define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @vtrnQf(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrnQf:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr q0, [x0]
@@ -238,8 +238,8 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
 ; CHECKBE-NEXT:    rev64 v0.4s, v0.4s
 ; CHECKBE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
 	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -248,7 +248,7 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
 
 ; Undef shuffle indices should not prevent matching to VTRN:
 
-define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vtrni8_undef(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrni8_undef:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr d0, [x0]
@@ -267,15 +267,15 @@ define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
 ; CHECKBE-NEXT:    add v0.8b, v2.8b, v0.8b
 ; CHECKBE-NEXT:    rev64 v0.8b, v0.8b
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
 	ret <8 x i8> %tmp5
 }
 
-define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vtrnQi16_undef(ptr %A, ptr %B) nounwind {
 ; CHECKLE-LABEL: vtrnQi16_undef:
 ; CHECKLE:       // %bb.0:
 ; CHECKLE-NEXT:    ldr q0, [x0]
@@ -295,8 +295,8 @@ define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
 ; CHECKBE-NEXT:    rev64 v0.8h, v0.8h
 ; CHECKBE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECKBE-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
         %tmp5 = add <8 x i16> %tmp3, %tmp4

diff  --git a/llvm/test/CodeGen/AArch64/arm64-trunc-store.ll b/llvm/test/CodeGen/AArch64/arm64-trunc-store.ll
index b7ad6218f3515..31a649ad64f44 100644
--- a/llvm/test/CodeGen/AArch64/arm64-trunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-trunc-store.ll
@@ -1,20 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-none-eabi | FileCheck %s
 
-define void @bar(<8 x i16> %arg, <8 x i8>* %p) nounwind {
+define void @bar(<8 x i16> %arg, ptr %p) nounwind {
 ; CHECK-LABEL: bar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %tmp = trunc <8 x i16> %arg to <8 x i8>
-  store <8 x i8> %tmp, <8 x i8>* %p, align 8
+  store <8 x i8> %tmp, ptr %p, align 8
   ret void
 }
 
- at zptr8 = common global i8* null, align 8
- at zptr16 = common global i16* null, align 8
- at zptr32 = common global i32* null, align 8
+ at zptr8 = common global ptr null, align 8
+ at zptr16 = common global ptr null, align 8
+ at zptr32 = common global ptr null, align 8
 
 define void @fct32(i32 %arg, i64 %var) {
 ; CHECK-LABEL: fct32:
@@ -26,12 +26,12 @@ define void @fct32(i32 %arg, i64 %var) {
 ; CHECK-NEXT:    str w1, [x8, w9, sxtw #2]
 ; CHECK-NEXT:    ret
 bb:
-  %.pre37 = load i32*, i32** @zptr32, align 8
+  %.pre37 = load ptr, ptr @zptr32, align 8
   %dec = add nsw i32 %arg, -1
   %idxprom8 = sext i32 %dec to i64
-  %arrayidx9 = getelementptr inbounds i32, i32* %.pre37, i64 %idxprom8
+  %arrayidx9 = getelementptr inbounds i32, ptr %.pre37, i64 %idxprom8
   %tmp = trunc i64 %var to i32
-  store i32 %tmp, i32* %arrayidx9, align 4
+  store i32 %tmp, ptr %arrayidx9, align 4
   ret void
 }
 
@@ -45,12 +45,12 @@ define void @fct16(i32 %arg, i64 %var) {
 ; CHECK-NEXT:    strh w1, [x8, w9, sxtw #1]
 ; CHECK-NEXT:    ret
 bb:
-  %.pre37 = load i16*, i16** @zptr16, align 8
+  %.pre37 = load ptr, ptr @zptr16, align 8
   %dec = add nsw i32 %arg, -1
   %idxprom8 = sext i32 %dec to i64
-  %arrayidx9 = getelementptr inbounds i16, i16* %.pre37, i64 %idxprom8
+  %arrayidx9 = getelementptr inbounds i16, ptr %.pre37, i64 %idxprom8
   %tmp = trunc i64 %var to i16
-  store i16 %tmp, i16* %arrayidx9, align 4
+  store i16 %tmp, ptr %arrayidx9, align 4
   ret void
 }
 
@@ -64,11 +64,11 @@ define void @fct8(i32 %arg, i64 %var) {
 ; CHECK-NEXT:    sturb w1, [x8, #-1]
 ; CHECK-NEXT:    ret
 bb:
-  %.pre37 = load i8*, i8** @zptr8, align 8
+  %.pre37 = load ptr, ptr @zptr8, align 8
   %dec = add nsw i32 %arg, -1
   %idxprom8 = sext i32 %dec to i64
-  %arrayidx9 = getelementptr inbounds i8, i8* %.pre37, i64 %idxprom8
+  %arrayidx9 = getelementptr inbounds i8, ptr %.pre37, i64 %idxprom8
   %tmp = trunc i64 %var to i8
-  store i8 %tmp, i8* %arrayidx9, align 4
+  store i8 %tmp, ptr %arrayidx9, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-umaxv.ll b/llvm/test/CodeGen/AArch64/arm64-umaxv.ll
index fb4124309f75b..ec9a1f0aae950 100644
--- a/llvm/test/CodeGen/AArch64/arm64-umaxv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-umaxv.ll
@@ -13,7 +13,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:
@@ -36,7 +36,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:
@@ -57,7 +57,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:
@@ -78,7 +78,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:

diff  --git a/llvm/test/CodeGen/AArch64/arm64-uminv.ll b/llvm/test/CodeGen/AArch64/arm64-uminv.ll
index 3a70e76a3905a..ec488feb1a7e6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-uminv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-uminv.ll
@@ -13,7 +13,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:
@@ -36,7 +36,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:
@@ -57,7 +57,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:
@@ -78,7 +78,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() nounwind
+  %call1 = tail call i32 @bar() nounwind
   br label %return
 
 return:

diff  --git a/llvm/test/CodeGen/AArch64/arm64-unaligned_ldst.ll b/llvm/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
index 20093e587bc34..d6705b095626d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
@@ -1,41 +1,35 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 ; rdar://r11231896
 
-define void @t1(i8* nocapture %a, i8* nocapture %b) nounwind {
+define void @t1(ptr nocapture %a, ptr nocapture %b) nounwind {
 entry:
 ; CHECK-LABEL: t1:
 ; CHECK-NOT: orr
 ; CHECK: ldr [[X0:x[0-9]+]], [x1]
 ; CHECK: str [[X0]], [x0]
-  %tmp1 = bitcast i8* %b to i64*
-  %tmp2 = bitcast i8* %a to i64*
-  %tmp3 = load i64, i64* %tmp1, align 1
-  store i64 %tmp3, i64* %tmp2, align 1
+  %tmp3 = load i64, ptr %b, align 1
+  store i64 %tmp3, ptr %a, align 1
   ret void
 }
 
-define void @t2(i8* nocapture %a, i8* nocapture %b) nounwind {
+define void @t2(ptr nocapture %a, ptr nocapture %b) nounwind {
 entry:
 ; CHECK-LABEL: t2:
 ; CHECK-NOT: orr
 ; CHECK: ldr [[W0:w[0-9]+]], [x1]
 ; CHECK: str [[W0]], [x0]
-  %tmp1 = bitcast i8* %b to i32*
-  %tmp2 = bitcast i8* %a to i32*
-  %tmp3 = load i32, i32* %tmp1, align 1
-  store i32 %tmp3, i32* %tmp2, align 1
+  %tmp3 = load i32, ptr %b, align 1
+  store i32 %tmp3, ptr %a, align 1
   ret void
 }
 
-define void @t3(i8* nocapture %a, i8* nocapture %b) nounwind {
+define void @t3(ptr nocapture %a, ptr nocapture %b) nounwind {
 entry:
 ; CHECK-LABEL: t3:
 ; CHECK-NOT: orr
 ; CHECK: ldrh [[W0:w[0-9]+]], [x1]
 ; CHECK: strh [[W0]], [x0]
-  %tmp1 = bitcast i8* %b to i16*
-  %tmp2 = bitcast i8* %a to i16*
-  %tmp3 = load i16, i16* %tmp1, align 1
-  store i16 %tmp3, i16* %tmp2, align 1
+  %tmp3 = load i16, ptr %b, align 1
+  store i16 %tmp3, ptr %a, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-uzp.ll b/llvm/test/CodeGen/AArch64/arm64-uzp.ll
index 0ffd919716972..94f86e7c88aeb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-uzp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-uzp.ll
@@ -1,77 +1,77 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vuzpi8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpi8:
 ;CHECK: uzp1.8b
 ;CHECK: uzp2.8b
 ;CHECK-NEXT: add.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
 	ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @vuzpi16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpi16:
 ;CHECK: uzp1.4h
 ;CHECK: uzp2.4h
 ;CHECK-NEXT: add.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
 	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
         %tmp5 = add <4 x i16> %tmp3, %tmp4
 	ret <4 x i16> %tmp5
 }
 
-define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vuzpQi8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpQi8:
 ;CHECK: uzp1.16b
 ;CHECK: uzp2.16b
 ;CHECK-NEXT: add.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4
 	ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vuzpQi16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpQi16:
 ;CHECK: uzp1.8h
 ;CHECK: uzp2.8h
 ;CHECK-NEXT: add.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4
 	ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @vuzpQi32(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpQi32:
 ;CHECK: uzp1.4s
 ;CHECK: uzp2.4s
 ;CHECK-NEXT: add.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
 	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
         %tmp5 = add <4 x i32> %tmp3, %tmp4
 	ret <4 x i32> %tmp5
 }
 
-define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @vuzpQf(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpQf:
 ;CHECK: uzp1.4s
 ;CHECK: uzp2.4s
 ;CHECK-NEXT: fadd.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
 	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -80,26 +80,26 @@ define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind {
 
 ; Undef shuffle indices should not prevent matching to VUZP:
 
-define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vuzpi8_undef(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpi8_undef:
 ;CHECK: uzp1.8b
 ;CHECK: uzp2.8b
 ;CHECK-NEXT: add.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
 	ret <8 x i8> %tmp5
 }
 
-define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vuzpQi16_undef(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vuzpQi16_undef:
 ;CHECK: uzp1.8h
 ;CHECK: uzp2.8h
 ;CHECK-NEXT: add.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 14>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 undef, i32 11, i32 13, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vaargs.ll b/llvm/test/CodeGen/AArch64/arm64-vaargs.ll
index 47dea611bc7e1..d4039c2a6d6fc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vaargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vaargs.ll
@@ -1,19 +1,18 @@
 ; RUN: llc < %s -mtriple=arm64-apple-darwin11.0.0 | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32:64"
 
-define float @t1(i8* nocapture %fmt, ...) nounwind ssp {
+define float @t1(ptr nocapture %fmt, ...) nounwind ssp {
 entry:
 ; CHECK: t1
 ; CHECK: fcvt
-  %argp = alloca i8*, align 8
-  %argp1 = bitcast i8** %argp to i8*
-  call void @llvm.va_start(i8* %argp1)
-  %0 = va_arg i8** %argp, i32
-  %1 = va_arg i8** %argp, float
-  call void @llvm.va_end(i8* %argp1)
+  %argp = alloca ptr, align 8
+  call void @llvm.va_start(ptr %argp)
+  %0 = va_arg ptr %argp, i32
+  %1 = va_arg ptr %argp, float
+  call void @llvm.va_end(ptr %argp)
   ret float %1
 }
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
 
-declare void @llvm.va_end(i8*) nounwind
+declare void @llvm.va_end(ptr) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index f7e8d2e0e01b5..8860c24a5d505 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -3,59 +3,59 @@
 ; RUN: llc < %s -global-isel -global-isel-abort=2 -pass-remarks-missed=gisel* -mtriple=arm64-eabi -aarch64-neon-syntax=apple 2>&1 | FileCheck %s --check-prefixes=CHECK,GISEL,FALLBACK
 
 ; FALLBACK-NOT: remark:{{.*}} sabdl8h
-define <8 x i16> @sabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sabdl.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
   %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
 
 ; FALLBACK-NOT: remark:{{.*}} sabdl4s
-define <4 x i32> @sabdl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @sabdl4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sabdl.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
 
 ; FALLBACK-NOT: remark:{{.*}} sabdl2d
-define <2 x i64> @sabdl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @sabdl2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sabdl.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
 
-define <8 x i16> @sabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl2_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    sabdl.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <16 x i8>, <16 x i8>* %A
-  %load2 = load <16 x i8>, <16 x i8>* %B
+  %load1 = load <16 x i8>, ptr %A
+  %load2 = load <16 x i8>, ptr %B
   %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -63,15 +63,15 @@ define <8 x i16> @sabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
   ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @sabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @sabdl2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl2_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    sabdl.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -79,15 +79,15 @@ define <4 x i32> @sabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
   ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @sabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @sabdl2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl2_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    sabdl.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -96,59 +96,59 @@ define <2 x i64> @sabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 }
 
 ; FALLBACK-NOT: remark:{{.*}} uabdl8h)
-define <8 x i16> @uabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @uabdl8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabdl8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uabdl.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
   %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
 
 ; FALLBACK-NOT: remark:{{.*}} uabdl4s)
-define <4 x i32> @uabdl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @uabdl4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabdl4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uabdl.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
 
 ; FALLBACK-NOT: remark:{{.*}} uabdl2d)
-define <2 x i64> @uabdl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @uabdl2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabdl2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uabdl.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
 
-define <8 x i16> @uabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabdl2_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    uabdl.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <16 x i8>, <16 x i8>* %A
-  %load2 = load <16 x i8>, <16 x i8>* %B
+  %load1 = load <16 x i8>, ptr %A
+  %load2 = load <16 x i8>, ptr %B
   %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
 
@@ -157,15 +157,15 @@ define <8 x i16> @uabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
   ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @uabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @uabdl2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabdl2_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    uabdl.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -173,15 +173,15 @@ define <4 x i32> @uabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
   ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @uabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @uabdl2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabdl2_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    uabdl.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -192,7 +192,7 @@ define <2 x i64> @uabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
 
-define i16 @uabd16b_rdx(<16 x i8>* %a, <16 x i8>* %b) {
+define i16 @uabd16b_rdx(ptr %a, ptr %b) {
 ; CHECK-LABEL: uabd16b_rdx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -201,8 +201,8 @@ define i16 @uabd16b_rdx(<16 x i8>* %a, <16 x i8>* %b) {
 ; CHECK-NEXT:    uaddlv.16b h0, v0
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %aload = load <16 x i8>, <16 x i8>* %a, align 1
-  %bload = load <16 x i8>, <16 x i8>* %b, align 1
+  %aload = load <16 x i8>, ptr %a, align 1
+  %bload = load <16 x i8>, ptr %b, align 1
   %aext = zext <16 x i8> %aload to <16 x i16>
   %bext = zext <16 x i8> %bload to <16 x i16>
   %ab
diff  = sub nsw <16 x i16> %aext, %bext
@@ -253,7 +253,7 @@ define i32 @sabd16b_rdx_i32(<16 x i8> %a, <16 x i8> %b) {
 declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
 declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
 
-define i32 @uabd8h_rdx(<8 x i16>* %a, <8 x i16>* %b) {
+define i32 @uabd8h_rdx(ptr %a, ptr %b) {
 ; CHECK-LABEL: uabd8h_rdx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -262,8 +262,8 @@ define i32 @uabd8h_rdx(<8 x i16>* %a, <8 x i16>* %b) {
 ; CHECK-NEXT:    uaddlv.8h s0, v0
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %aload = load <8 x i16>, <8 x i16>* %a, align 1
-  %bload = load <8 x i16>, <8 x i16>* %b, align 1
+  %aload = load <8 x i16>, ptr %a, align 1
+  %bload = load <8 x i16>, ptr %b, align 1
   %aext = zext <8 x i16> %aload to <8 x i32>
   %bext = zext <8 x i16> %bload to <8 x i32>
   %ab
diff  = sub nsw <8 x i32> %aext, %bext
@@ -324,7 +324,7 @@ define i32 @uabdl4s_rdx_i32(<4 x i16> %a, <4 x i16> %b) {
 declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
 declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
 
-define i64 @uabd4s_rdx(<4 x i32>* %a, <4 x i32>* %b, i32 %h) {
+define i64 @uabd4s_rdx(ptr %a, ptr %b, i32 %h) {
 ; CHECK-LABEL: uabd4s_rdx:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -333,8 +333,8 @@ define i64 @uabd4s_rdx(<4 x i32>* %a, <4 x i32>* %b, i32 %h) {
 ; CHECK-NEXT:    uaddlv.4s d0, v0
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %aload = load <4 x i32>, <4 x i32>* %a, align 1
-  %bload = load <4 x i32>, <4 x i32>* %b, align 1
+  %aload = load <4 x i32>, ptr %a, align 1
+  %bload = load <4 x i32>, ptr %b, align 1
   %aext = zext <4 x i32> %aload to <4 x i64>
   %bext = zext <4 x i32> %bload to <4 x i64>
   %ab
diff  = sub nsw <4 x i64> %aext, %bext
@@ -392,41 +392,41 @@ define i64 @uabdl2d_rdx_i64(<2 x i32> %a, <2 x i32> %b) {
   ret i64 %reduced_v
 }
 
-define <2 x float> @fabd_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fabd_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fabd_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fabd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
   %tmp3 = call <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
   ret <2 x float> %tmp3
 }
 
-define <4 x float> @fabd_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fabd_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fabd_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fabd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
   %tmp3 = call <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
   ret <4 x float> %tmp3
 }
 
-define <2 x double> @fabd_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fabd_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fabd_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fabd.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
   %tmp3 = call <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
   ret <2 x double> %tmp3
 }
@@ -435,43 +435,43 @@ declare <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float>, <2 x float>) noun
 declare <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @fabd_2s_from_fsub_fabs(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fabd_2s_from_fsub_fabs(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fabd_2s_from_fsub_fabs:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fabd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
   %sub = fsub <2 x float> %tmp1, %tmp2
   %abs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %sub)
   ret <2 x float> %abs
 }
 
-define <4 x float> @fabd_4s_from_fsub_fabs(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fabd_4s_from_fsub_fabs(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fabd_4s_from_fsub_fabs:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fabd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
   %sub = fsub <4 x float> %tmp1, %tmp2
   %abs = call <4 x float> @llvm.fabs.v4f32(<4 x float> %sub)
   ret <4 x float> %abs
 }
 
-define <2 x double> @fabd_2d_from_fsub_fabs(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fabd_2d_from_fsub_fabs(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fabd_2d_from_fsub_fabs:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fabd.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
   %sub = fsub <2 x double> %tmp1, %tmp2
   %abs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %sub)
   ret <2 x double> %abs
@@ -481,80 +481,80 @@ declare <2 x float> @llvm.fabs.v2f32(<2 x float>) nounwind readnone
 declare <4 x float> @llvm.fabs.v4f32(<4 x float>) nounwind readnone
 declare <2 x double> @llvm.fabs.v2f64(<2 x double>) nounwind readnone
 
-define <8 x i8> @sabd_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @sabd_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabd_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sabd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @sabd_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @sabd_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabd_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sabd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
   %tmp3 = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
   ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @sabd_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sabd_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabd_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sabd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @sabd_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sabd_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabd_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sabd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
   ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @sabd_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sabd_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabd_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sabd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @sabd_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sabd_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabd_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sabd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
   ret <4 x i32> %tmp3
 }
@@ -566,80 +566,80 @@ declare <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16>, <8 x i16>) nounwind r
 declare <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @uabd_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uabd_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabd_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uabd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @uabd_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uabd_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabd_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uabd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
   %tmp3 = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
   ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @uabd_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uabd_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabd_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uabd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @uabd_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uabd_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabd_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uabd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
   ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @uabd_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uabd_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabd_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uabd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @uabd_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uabd_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uabd_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uabd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
   ret <4 x i32> %tmp3
 }
@@ -651,68 +651,68 @@ declare <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16>, <8 x i16>) nounwind r
 declare <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @sqabs_8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @sqabs_8b(ptr %A) nounwind {
 ; CHECK-LABEL: sqabs_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    sqabs.8b v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp1 = load <8 x i8>, ptr %A
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqabs.v8i8(<8 x i8> %tmp1)
   ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @sqabs_16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @sqabs_16b(ptr %A) nounwind {
 ; CHECK-LABEL: sqabs_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    sqabs.16b v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
+  %tmp1 = load <16 x i8>, ptr %A
   %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqabs.v16i8(<16 x i8> %tmp1)
   ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @sqabs_4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @sqabs_4h(ptr %A) nounwind {
 ; CHECK-LABEL: sqabs_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    sqabs.4h v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp1 = load <4 x i16>, ptr %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqabs.v4i16(<4 x i16> %tmp1)
   ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @sqabs_8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @sqabs_8h(ptr %A) nounwind {
 ; CHECK-LABEL: sqabs_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    sqabs.8h v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqabs.v8i16(<8 x i16> %tmp1)
   ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @sqabs_2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @sqabs_2s(ptr %A) nounwind {
 ; CHECK-LABEL: sqabs_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    sqabs.2s v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp1 = load <2 x i32>, ptr %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqabs.v2i32(<2 x i32> %tmp1)
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @sqabs_4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @sqabs_4s(ptr %A) nounwind {
 ; CHECK-LABEL: sqabs_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    sqabs.4s v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp1 = load <4 x i32>, ptr %A
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqabs.v4i32(<4 x i32> %tmp1)
   ret <4 x i32> %tmp3
 }
@@ -724,68 +724,68 @@ declare <8 x i16> @llvm.aarch64.neon.sqabs.v8i16(<8 x i16>) nounwind readnone
 declare <2 x i32> @llvm.aarch64.neon.sqabs.v2i32(<2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.sqabs.v4i32(<4 x i32>) nounwind readnone
 
-define <8 x i8> @sqneg_8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @sqneg_8b(ptr %A) nounwind {
 ; CHECK-LABEL: sqneg_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    sqneg.8b v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp1 = load <8 x i8>, ptr %A
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqneg.v8i8(<8 x i8> %tmp1)
   ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @sqneg_16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @sqneg_16b(ptr %A) nounwind {
 ; CHECK-LABEL: sqneg_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    sqneg.16b v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
+  %tmp1 = load <16 x i8>, ptr %A
   %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqneg.v16i8(<16 x i8> %tmp1)
   ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @sqneg_4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @sqneg_4h(ptr %A) nounwind {
 ; CHECK-LABEL: sqneg_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    sqneg.4h v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp1 = load <4 x i16>, ptr %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqneg.v4i16(<4 x i16> %tmp1)
   ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @sqneg_8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @sqneg_8h(ptr %A) nounwind {
 ; CHECK-LABEL: sqneg_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    sqneg.8h v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> %tmp1)
   ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @sqneg_2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @sqneg_2s(ptr %A) nounwind {
 ; CHECK-LABEL: sqneg_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    sqneg.2s v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp1 = load <2 x i32>, ptr %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqneg.v2i32(<2 x i32> %tmp1)
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @sqneg_4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @sqneg_4s(ptr %A) nounwind {
 ; CHECK-LABEL: sqneg_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    sqneg.4s v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp1 = load <4 x i32>, ptr %A
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqneg.v4i32(<4 x i32> %tmp1)
   ret <4 x i32> %tmp3
 }
@@ -797,68 +797,68 @@ declare <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16>) nounwind readnone
 declare <2 x i32> @llvm.aarch64.neon.sqneg.v2i32(<2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.sqneg.v4i32(<4 x i32>) nounwind readnone
 
-define <8 x i8> @abs_8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @abs_8b(ptr %A) nounwind {
 ; CHECK-LABEL: abs_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    abs.8b v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp1 = load <8 x i8>, ptr %A
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.abs.v8i8(<8 x i8> %tmp1)
   ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @abs_16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @abs_16b(ptr %A) nounwind {
 ; CHECK-LABEL: abs_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    abs.16b v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
+  %tmp1 = load <16 x i8>, ptr %A
   %tmp3 = call <16 x i8> @llvm.aarch64.neon.abs.v16i8(<16 x i8> %tmp1)
   ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @abs_4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @abs_4h(ptr %A) nounwind {
 ; CHECK-LABEL: abs_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    abs.4h v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp1 = load <4 x i16>, ptr %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.abs.v4i16(<4 x i16> %tmp1)
   ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @abs_8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @abs_8h(ptr %A) nounwind {
 ; CHECK-LABEL: abs_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    abs.8h v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.abs.v8i16(<8 x i16> %tmp1)
   ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @abs_2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @abs_2s(ptr %A) nounwind {
 ; CHECK-LABEL: abs_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    abs.2s v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp1 = load <2 x i32>, ptr %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.abs.v2i32(<2 x i32> %tmp1)
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @abs_4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @abs_4s(ptr %A) nounwind {
 ; CHECK-LABEL: abs_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    abs.4s v0, v0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp1 = load <4 x i32>, ptr %A
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.abs.v4i32(<4 x i32> %tmp1)
   ret <4 x i32> %tmp3
 }
@@ -893,7 +893,7 @@ declare <1 x i64> @llvm.aarch64.neon.abs.v1i64(<1 x i64>) nounwind readnone
 declare i64 @llvm.aarch64.neon.abs.i64(i64) nounwind readnone
 
 ; FALLBACK-NOT: remark:{{.*}} sabal8h
-define <8 x i16> @sabal8h(<8 x i8>* %A, <8 x i8>* %B,  <8 x i16>* %C) nounwind {
+define <8 x i16> @sabal8h(ptr %A, ptr %B,  ptr %C) nounwind {
 ; DAG-LABEL: sabal8h:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -909,9 +909,9 @@ define <8 x i16> @sabal8h(<8 x i8>* %A, <8 x i8>* %B,  <8 x i16>* %C) nounwind {
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    sabal.8h v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i16>, <8 x i16>* %C
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i16>, ptr %C
   %tmp4 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
   %tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
   %tmp5 = add <8 x i16> %tmp3, %tmp4.1
@@ -919,7 +919,7 @@ define <8 x i16> @sabal8h(<8 x i8>* %A, <8 x i8>* %B,  <8 x i16>* %C) nounwind {
 }
 
 ; FALLBACK-NOT: remark:{{.*}} sabal4s
-define <4 x i32> @sabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sabal4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: sabal4s:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -935,9 +935,9 @@ define <4 x i32> @sabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    sabal.4s v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
   %tmp5 = add <4 x i32> %tmp3, %tmp4.1
@@ -945,7 +945,7 @@ define <4 x i32> @sabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 }
 
 ; FALLBACK-NOT: remark:{{.*}} sabal2d
-define <2 x i64> @sabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sabal2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: sabal2d:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -961,9 +961,9 @@ define <2 x i64> @sabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    sabal.2d v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
   %tmp4.1.1 = zext <2 x i32> %tmp4 to <2 x i64>
@@ -971,7 +971,7 @@ define <2 x i64> @sabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind
   ret <2 x i64> %tmp5
 }
 
-define <8 x i16> @sabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwind {
+define <8 x i16> @sabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sabal2_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -979,9 +979,9 @@ define <8 x i16> @sabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwin
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    sabal.8h v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <16 x i8>, <16 x i8>* %A
-  %load2 = load <16 x i8>, <16 x i8>* %B
-  %tmp3 = load <8 x i16>, <8 x i16>* %C
+  %load1 = load <16 x i8>, ptr %A
+  %load2 = load <16 x i8>, ptr %B
+  %tmp3 = load <8 x i16>, ptr %C
   %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %tmp4 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -990,7 +990,7 @@ define <8 x i16> @sabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwin
   ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @sabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sabal2_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -998,9 +998,9 @@ define <4 x i32> @sabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwin
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    sabal.4s v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1009,7 +1009,7 @@ define <4 x i32> @sabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwin
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @sabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sabal2_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1017,9 +1017,9 @@ define <2 x i64> @sabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwin
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    sabal.2d v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1029,7 +1029,7 @@ define <2 x i64> @sabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwin
 }
 
 ; FALLBACK-NOT: remark:{{.*}} uabal8h
-define <8 x i16> @uabal8h(<8 x i8>* %A, <8 x i8>* %B,  <8 x i16>* %C) nounwind {
+define <8 x i16> @uabal8h(ptr %A, ptr %B,  ptr %C) nounwind {
 ; DAG-LABEL: uabal8h:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1045,9 +1045,9 @@ define <8 x i16> @uabal8h(<8 x i8>* %A, <8 x i8>* %B,  <8 x i16>* %C) nounwind {
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    uabal.8h v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
-  %tmp3 = load <8 x i16>, <8 x i16>* %C
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
+  %tmp3 = load <8 x i16>, ptr %C
   %tmp4 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
   %tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
   %tmp5 = add <8 x i16> %tmp3, %tmp4.1
@@ -1055,7 +1055,7 @@ define <8 x i16> @uabal8h(<8 x i8>* %A, <8 x i8>* %B,  <8 x i16>* %C) nounwind {
 }
 
 ; FALLBACK-NOT: remark:{{.*}} uabal8s
-define <4 x i32> @uabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @uabal4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uabal4s:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1071,9 +1071,9 @@ define <4 x i32> @uabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    uabal.4s v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
   %tmp5 = add <4 x i32> %tmp3, %tmp4.1
@@ -1081,7 +1081,7 @@ define <4 x i32> @uabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 }
 
 ; FALLBACK-NOT: remark:{{.*}} uabal2d
-define <2 x i64> @uabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @uabal2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uabal2d:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1097,16 +1097,16 @@ define <2 x i64> @uabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    uabal.2d v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
   %tmp5 = add <2 x i64> %tmp3, %tmp4.1
   ret <2 x i64> %tmp5
 }
 
-define <8 x i16> @uabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwind {
+define <8 x i16> @uabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: uabal2_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1114,9 +1114,9 @@ define <8 x i16> @uabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwin
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    uabal.8h v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <16 x i8>, <16 x i8>* %A
-  %load2 = load <16 x i8>, <16 x i8>* %B
-  %tmp3 = load <8 x i16>, <8 x i16>* %C
+  %load1 = load <16 x i8>, ptr %A
+  %load2 = load <16 x i8>, ptr %B
+  %tmp3 = load <8 x i16>, ptr %C
   %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %tmp4 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -1125,7 +1125,7 @@ define <8 x i16> @uabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwin
   ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @uabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @uabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: uabal2_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1133,9 +1133,9 @@ define <4 x i32> @uabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwin
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    uabal.4s v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1144,7 +1144,7 @@ define <4 x i32> @uabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwin
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @uabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @uabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: uabal2_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1152,9 +1152,9 @@ define <2 x i64> @uabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwin
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    uabal.2d v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1163,7 +1163,7 @@ define <2 x i64> @uabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwin
   ret <2 x i64> %tmp5
 }
 
-define <8 x i8> @saba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i8> @saba_8b(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: saba_8b:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1179,15 +1179,15 @@ define <8 x i8> @saba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ; GISEL-NEXT:    ldr d0, [x2]
 ; GISEL-NEXT:    saba.8b v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-  %tmp4 = load <8 x i8>, <8 x i8>* %C
+  %tmp4 = load <8 x i8>, ptr %C
   %tmp5 = add <8 x i8> %tmp3, %tmp4
   ret <8 x i8> %tmp5
 }
 
-define <16 x i8> @saba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
+define <16 x i8> @saba_16b(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: saba_16b:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr q1, [x1]
@@ -1203,15 +1203,15 @@ define <16 x i8> @saba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    saba.16b v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
   %tmp3 = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-  %tmp4 = load <16 x i8>, <16 x i8>* %C
+  %tmp4 = load <16 x i8>, ptr %C
   %tmp5 = add <16 x i8> %tmp3, %tmp4
   ret <16 x i8> %tmp5
 }
 
-define <4 x i16> @saba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i16> @saba_4h(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: saba_4h:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1227,15 +1227,15 @@ define <4 x i16> @saba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
 ; GISEL-NEXT:    ldr d0, [x2]
 ; GISEL-NEXT:    saba.4h v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-  %tmp4 = load <4 x i16>, <4 x i16>* %C
+  %tmp4 = load <4 x i16>, ptr %C
   %tmp5 = add <4 x i16> %tmp3, %tmp4
   ret <4 x i16> %tmp5
 }
 
-define <8 x i16> @saba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
+define <8 x i16> @saba_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: saba_8h:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr q1, [x1]
@@ -1251,15 +1251,15 @@ define <8 x i16> @saba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    saba.8h v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-  %tmp4 = load <8 x i16>, <8 x i16>* %C
+  %tmp4 = load <8 x i16>, ptr %C
   %tmp5 = add <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
-define <2 x i32> @saba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i32> @saba_2s(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: saba_2s:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1275,15 +1275,15 @@ define <2 x i32> @saba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
 ; GISEL-NEXT:    ldr d0, [x2]
 ; GISEL-NEXT:    saba.2s v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-  %tmp4 = load <2 x i32>, <2 x i32>* %C
+  %tmp4 = load <2 x i32>, ptr %C
   %tmp5 = add <2 x i32> %tmp3, %tmp4
   ret <2 x i32> %tmp5
 }
 
-define <4 x i32> @saba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @saba_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: saba_4s:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr q1, [x1]
@@ -1299,15 +1299,15 @@ define <4 x i32> @saba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    saba.4s v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-  %tmp4 = load <4 x i32>, <4 x i32>* %C
+  %tmp4 = load <4 x i32>, ptr %C
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <8 x i8> @uaba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+define <8 x i8> @uaba_8b(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uaba_8b:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1323,15 +1323,15 @@ define <8 x i8> @uaba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ; GISEL-NEXT:    ldr d0, [x2]
 ; GISEL-NEXT:    uaba.8b v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
-  %tmp4 = load <8 x i8>, <8 x i8>* %C
+  %tmp4 = load <8 x i8>, ptr %C
   %tmp5 = add <8 x i8> %tmp3, %tmp4
   ret <8 x i8> %tmp5
 }
 
-define <16 x i8> @uaba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
+define <16 x i8> @uaba_16b(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uaba_16b:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr q1, [x1]
@@ -1347,15 +1347,15 @@ define <16 x i8> @uaba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    uaba.16b v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
   %tmp3 = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
-  %tmp4 = load <16 x i8>, <16 x i8>* %C
+  %tmp4 = load <16 x i8>, ptr %C
   %tmp5 = add <16 x i8> %tmp3, %tmp4
   ret <16 x i8> %tmp5
 }
 
-define <4 x i16> @uaba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+define <4 x i16> @uaba_4h(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uaba_4h:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1371,15 +1371,15 @@ define <4 x i16> @uaba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
 ; GISEL-NEXT:    ldr d0, [x2]
 ; GISEL-NEXT:    uaba.4h v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
-  %tmp4 = load <4 x i16>, <4 x i16>* %C
+  %tmp4 = load <4 x i16>, ptr %C
   %tmp5 = add <4 x i16> %tmp3, %tmp4
   ret <4 x i16> %tmp5
 }
 
-define <8 x i16> @uaba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
+define <8 x i16> @uaba_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uaba_8h:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr q1, [x1]
@@ -1395,15 +1395,15 @@ define <8 x i16> @uaba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    uaba.8h v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
-  %tmp4 = load <8 x i16>, <8 x i16>* %C
+  %tmp4 = load <8 x i16>, ptr %C
   %tmp5 = add <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
-define <2 x i32> @uaba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+define <2 x i32> @uaba_2s(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uaba_2s:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr d1, [x1]
@@ -1419,15 +1419,15 @@ define <2 x i32> @uaba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
 ; GISEL-NEXT:    ldr d0, [x2]
 ; GISEL-NEXT:    uaba.2s v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
-  %tmp4 = load <2 x i32>, <2 x i32>* %C
+  %tmp4 = load <2 x i32>, ptr %C
   %tmp5 = add <2 x i32> %tmp3, %tmp4
   ret <2 x i32> %tmp5
 }
 
-define <4 x i32> @uaba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @uaba_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; DAG-LABEL: uaba_4s:
 ; DAG:       // %bb.0:
 ; DAG-NEXT:    ldr q1, [x1]
@@ -1443,10 +1443,10 @@ define <4 x i32> @uaba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind
 ; GISEL-NEXT:    ldr q0, [x2]
 ; GISEL-NEXT:    uaba.4s v0, v1, v2
 ; GISEL-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
-  %tmp4 = load <4 x i32>, <4 x i32>* %C
+  %tmp4 = load <4 x i32>, ptr %C
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vadd.ll b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
index 6fa90c4636966..ad089f38955be 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
@@ -1,28 +1,28 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
 
-define <8 x i8> @addhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i8> @addhn8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn8b:
 ;CHECK: addhn.8b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @addhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i16> @addhn4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn4h:
 ;CHECK: addhn.4h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @addhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i32> @addhn2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn2s:
 ;CHECK: addhn.2s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.addhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -62,29 +62,29 @@ declare <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32>, <4 x i32>) nounwind
 declare <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
 
 
-define <8 x i8> @raddhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i8> @raddhn8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: raddhn8b:
 ;CHECK: raddhn.8b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @raddhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i16> @raddhn4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: raddhn4h:
 ;CHECK: raddhn.4h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @raddhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i32> @raddhn2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: raddhn2s:
 ;CHECK: raddhn.2s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -123,33 +123,33 @@ declare <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64>, <2 x i64>) nounwind
 declare <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
 declare <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
 
-define <8 x i16> @saddl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @saddl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddl8h:
 ;CHECK: saddl.8h
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = add <8 x i16> %tmp3, %tmp4
         ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @saddl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @saddl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddl4s:
 ;CHECK: saddl.4s
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = add <4 x i32> %tmp3, %tmp4
         ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @saddl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @saddl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddl2d:
 ;CHECK: saddl.2d
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = add <2 x i64> %tmp3, %tmp4
@@ -204,33 +204,33 @@ define <2 x i64> @saddl2_2d(<4 x i32> %a, <4 x i32> %b) nounwind  {
   ret <2 x i64> %add.i
 }
 
-define <8 x i16> @uaddl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @uaddl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddl8h:
 ;CHECK: uaddl.8h
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = add <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @uaddl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @uaddl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddl4s:
 ;CHECK: uaddl.4s
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @uaddl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @uaddl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddl2d:
 ;CHECK: uaddl.2d
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = add <2 x i64> %tmp3, %tmp4
@@ -286,42 +286,42 @@ define <2 x i64> @uaddl2_2d(<4 x i32> %a, <4 x i32> %b) nounwind  {
   ret <2 x i64> %add.i
 }
 
-define <8 x i16> @uaddw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @uaddw8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddw8h:
 ;CHECK: uaddw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp4 = add <8 x i16> %tmp1, %tmp3
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @uaddw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @uaddw4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddw4s:
 ;CHECK: uaddw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp4 = add <4 x i32> %tmp1, %tmp3
         ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @uaddw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @uaddw2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddw2d:
 ;CHECK: uaddw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp4 = add <2 x i64> %tmp1, %tmp3
         ret <2 x i64> %tmp4
 }
 
-define <8 x i16> @uaddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @uaddw2_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddw2_8h:
 ;CHECK: uaddw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
 
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp2 = load <16 x i8>, ptr %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = zext <8 x i8> %high2 to <8 x i16>
 
@@ -329,12 +329,12 @@ define <8 x i16> @uaddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
         ret <8 x i16> %res
 }
 
-define <4 x i32> @uaddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @uaddw2_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddw2_4s:
 ;CHECK: uaddw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
 
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp2 = load <8 x i16>, ptr %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = zext <4 x i16> %high2 to <4 x i32>
 
@@ -342,12 +342,12 @@ define <4 x i32> @uaddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
         ret <4 x i32> %res
 }
 
-define <2 x i64> @uaddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @uaddw2_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uaddw2_2d:
 ;CHECK: uaddw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
 
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp2 = load <4 x i32>, ptr %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = zext <2 x i32> %high2 to <2 x i64>
 
@@ -355,42 +355,42 @@ define <2 x i64> @uaddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
         ret <2 x i64> %res
 }
 
-define <8 x i16> @saddw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @saddw8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddw8h:
 ;CHECK: saddw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
         %tmp4 = add <8 x i16> %tmp1, %tmp3
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @saddw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @saddw4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddw4s:
 ;CHECK: saddw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
         %tmp4 = add <4 x i32> %tmp1, %tmp3
         ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @saddw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @saddw2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddw2d:
 ;CHECK: saddw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
         %tmp4 = add <2 x i64> %tmp1, %tmp3
         ret <2 x i64> %tmp4
 }
 
-define <8 x i16> @saddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @saddw2_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddw2_8h:
 ;CHECK: saddw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
 
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp2 = load <16 x i8>, ptr %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = sext <8 x i8> %high2 to <8 x i16>
 
@@ -398,12 +398,12 @@ define <8 x i16> @saddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
         ret <8 x i16> %res
 }
 
-define <4 x i32> @saddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @saddw2_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddw2_4s:
 ;CHECK: saddw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
 
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp2 = load <8 x i16>, ptr %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = sext <4 x i16> %high2 to <4 x i32>
 
@@ -411,12 +411,12 @@ define <4 x i32> @saddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
         ret <4 x i32> %res
 }
 
-define <2 x i64> @saddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @saddw2_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: saddw2_2d:
 ;CHECK: saddw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
 
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp2 = load <4 x i32>, ptr %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = sext <2 x i32> %high2 to <2 x i64>
 
@@ -424,50 +424,50 @@ define <2 x i64> @saddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
         ret <2 x i64> %res
 }
 
-define <4 x i16> @saddlp4h(<8 x i8>* %A) nounwind {
+define <4 x i16> @saddlp4h(ptr %A) nounwind {
 ;CHECK-LABEL: saddlp4h:
 ;CHECK: saddlp.4h
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @saddlp2s(<4 x i16>* %A) nounwind {
+define <2 x i32> @saddlp2s(ptr %A) nounwind {
 ;CHECK-LABEL: saddlp2s:
 ;CHECK: saddlp.2s
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
         ret <2 x i32> %tmp3
 }
 
-define <1 x i64> @saddlp1d(<2 x i32>* %A) nounwind {
+define <1 x i64> @saddlp1d(ptr %A) nounwind {
 ;CHECK-LABEL: saddlp1d:
 ;CHECK: saddlp.1d
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> %tmp1)
         ret <1 x i64> %tmp3
 }
 
-define <8 x i16> @saddlp8h(<16 x i8>* %A) nounwind {
+define <8 x i16> @saddlp8h(ptr %A) nounwind {
 ;CHECK-LABEL: saddlp8h:
 ;CHECK: saddlp.8h
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @saddlp4s(<8 x i16>* %A) nounwind {
+define <4 x i32> @saddlp4s(ptr %A) nounwind {
 ;CHECK-LABEL: saddlp4s:
 ;CHECK: saddlp.4s
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @saddlp2d(<4 x i32>* %A) nounwind {
+define <2 x i64> @saddlp2d(ptr %A) nounwind {
 ;CHECK-LABEL: saddlp2d:
 ;CHECK: saddlp.2d
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
         ret <2 x i64> %tmp3
 }
@@ -480,50 +480,50 @@ declare <8 x i16>  @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8>) nounwind rea
 declare <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32>) nounwind readnone
 
-define <4 x i16> @uaddlp4h(<8 x i8>* %A) nounwind {
+define <4 x i16> @uaddlp4h(ptr %A) nounwind {
 ;CHECK-LABEL: uaddlp4h:
 ;CHECK: uaddlp.4h
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uaddlp2s(<4 x i16>* %A) nounwind {
+define <2 x i32> @uaddlp2s(ptr %A) nounwind {
 ;CHECK-LABEL: uaddlp2s:
 ;CHECK: uaddlp.2s
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
         ret <2 x i32> %tmp3
 }
 
-define <1 x i64> @uaddlp1d(<2 x i32>* %A) nounwind {
+define <1 x i64> @uaddlp1d(ptr %A) nounwind {
 ;CHECK-LABEL: uaddlp1d:
 ;CHECK: uaddlp.1d
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %tmp1)
         ret <1 x i64> %tmp3
 }
 
-define <8 x i16> @uaddlp8h(<16 x i8>* %A) nounwind {
+define <8 x i16> @uaddlp8h(ptr %A) nounwind {
 ;CHECK-LABEL: uaddlp8h:
 ;CHECK: uaddlp.8h
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @uaddlp4s(<8 x i16>* %A) nounwind {
+define <4 x i32> @uaddlp4s(ptr %A) nounwind {
 ;CHECK-LABEL: uaddlp4s:
 ;CHECK: uaddlp.4s
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @uaddlp2d(<4 x i32>* %A) nounwind {
+define <2 x i64> @uaddlp2d(ptr %A) nounwind {
 ;CHECK-LABEL: uaddlp2d:
 ;CHECK: uaddlp.2d
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
         ret <2 x i64> %tmp3
 }
@@ -536,165 +536,165 @@ declare <8 x i16>  @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8>) nounwind rea
 declare <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32>) nounwind readnone
 
-define <4 x i16> @sadalp4h(<8 x i8>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sadalp4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sadalp4h:
 ;CHECK: sadalp.4h
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @sadalp2s(<4 x i16>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sadalp2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sadalp2s:
 ;CHECK: sadalp.2s
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <8 x i16> @sadalp8h(<16 x i8>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sadalp8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sadalp8h:
 ;CHECK: sadalp.8h
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
         ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @sadalp4s(<8 x i16>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sadalp4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sadalp4s:
 ;CHECK: sadalp.4s
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
         ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @sadalp2d(<4 x i32>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @sadalp2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sadalp2d:
 ;CHECK: sadalp.2d
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
         ret <2 x i64> %tmp5
 }
 
-define <4 x i16> @uadalp4h(<8 x i8>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uadalp4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uadalp4h:
 ;CHECK: uadalp.4h
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @uadalp2s(<4 x i16>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uadalp2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uadalp2s:
 ;CHECK: uadalp.2s
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <8 x i16> @uadalp8h(<16 x i8>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uadalp8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uadalp8h:
 ;CHECK: uadalp.8h
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
         ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @uadalp4s(<8 x i16>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uadalp4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uadalp4s:
 ;CHECK: uadalp.4s
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
         ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @uadalp2d(<4 x i32>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @uadalp2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uadalp2d:
 ;CHECK: uadalp.2d
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
         ret <2 x i64> %tmp5
 }
 
-define <8 x i8> @addp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @addp_8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addp_8b:
 ;CHECK: addp.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @addp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @addp_16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addp_16b:
 ;CHECK: addp.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @addp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @addp_4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addp_4h:
 ;CHECK: addp.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @addp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @addp_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addp_8h:
 ;CHECK: addp.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @addp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @addp_2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addp_2s:
 ;CHECK: addp.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @addp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @addp_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addp_4s:
 ;CHECK: addp.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @addp_2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @addp_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addp_2d:
 ;CHECK: addp.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -707,29 +707,29 @@ declare <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32>, <2 x i32>) nounwind r
 declare <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
-define <2 x float> @faddp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @faddp_2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: faddp_2s:
 ;CHECK: faddp.2s
-        %tmp1 = load <2 x float>, <2 x float>* %A
-        %tmp2 = load <2 x float>, <2 x float>* %B
+        %tmp1 = load <2 x float>, ptr %A
+        %tmp2 = load <2 x float>, ptr %B
         %tmp3 = call <2 x float> @llvm.aarch64.neon.faddp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
         ret <2 x float> %tmp3
 }
 
-define <4 x float> @faddp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @faddp_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: faddp_4s:
 ;CHECK: faddp.4s
-        %tmp1 = load <4 x float>, <4 x float>* %A
-        %tmp2 = load <4 x float>, <4 x float>* %B
+        %tmp1 = load <4 x float>, ptr %A
+        %tmp2 = load <4 x float>, ptr %B
         %tmp3 = call <4 x float> @llvm.aarch64.neon.faddp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
         ret <4 x float> %tmp3
 }
 
-define <2 x double> @faddp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @faddp_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: faddp_2d:
 ;CHECK: faddp.2d
-        %tmp1 = load <2 x double>, <2 x double>* %A
-        %tmp2 = load <2 x double>, <2 x double>* %B
+        %tmp1 = load <2 x double>, ptr %A
+        %tmp2 = load <2 x double>, ptr %B
         %tmp3 = call <2 x double> @llvm.aarch64.neon.faddp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
         ret <2 x double> %tmp3
 }
@@ -866,44 +866,44 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
   ret <2 x i64> %res
 }
 
-define <8 x i8> @addhn8b_natural(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i8> @addhn8b_natural(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn8b_natural:
 ;CHECK: addhn.8b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %sum = add <8 x i16> %tmp1, %tmp2
         %high_bits = lshr <8 x i16> %sum, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
         %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
         ret <8 x i8> %narrowed
 }
 
-define <4 x i16> @addhn4h_natural(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i16> @addhn4h_natural(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn4h_natural:
 ;CHECK: addhn.4h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %sum = add <4 x i32> %tmp1, %tmp2
         %high_bits = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
         %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
         ret <4 x i16> %narrowed
 }
 
-define <2 x i32> @addhn2s_natural(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i32> @addhn2s_natural(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn2s_natural:
 ;CHECK: addhn.2s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %sum = add <2 x i64> %tmp1, %tmp2
         %high_bits = lshr <2 x i64> %sum, <i64 32, i64 32>
         %narrowed = trunc <2 x i64> %high_bits to <2 x i32>
         ret <2 x i32> %narrowed
 }
 
-define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn2_16b_natural:
 ;CHECK: addhn2.16b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %sum = add <8 x i16> %tmp1, %tmp2
         %high_bits = lshr <8 x i16> %sum, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
         %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
@@ -911,11 +911,11 @@ define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B
         ret <16 x i8> %res
 }
 
-define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn2_8h_natural:
 ;CHECK: addhn2.8h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %sum = add <4 x i32> %tmp1, %tmp2
         %high_bits = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
         %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
@@ -923,11 +923,11 @@ define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B
         ret <8 x i16> %res
 }
 
-define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, <2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: addhn2_4s_natural:
 ;CHECK: addhn2.4s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %sum = add <2 x i64> %tmp1, %tmp2
         %high_bits = lshr <2 x i64> %sum, <i64 32, i64 32>
         %narrowed = trunc <2 x i64> %high_bits to <2 x i32>
@@ -935,18 +935,18 @@ define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, <2 x i64>* %A, <2 x i64>* %B
         ret <4 x i32> %res
 }
 
-define <4 x i32> @addhn_addhn2_4s(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C, <2 x i64>* %D) nounwind {
+define <4 x i32> @addhn_addhn2_4s(ptr %A, ptr %B, ptr %C, ptr %D) nounwind {
 ;CHECK-LABEL: addhn_addhn2_4s
 ;CHECK:     addhn.2s
 ;CHECK:     addhn2.4s
 ;CHECK-NOT: uzp2.4s
-            %tmp1 = load <2 x i64>, <2 x i64>* %A
-            %tmp2 = load <2 x i64>, <2 x i64>* %B
+            %tmp1 = load <2 x i64>, ptr %A
+            %tmp2 = load <2 x i64>, ptr %B
             %sum1 = add <2 x i64> %tmp1, %tmp2
             %low_bits = lshr <2 x i64> %sum1, <i64 32, i64 32>
             %narrowed1 = trunc <2 x i64> %low_bits to <2 x i32>
-            %tmp3 = load <2 x i64>, <2 x i64>* %C
-            %tmp4 = load <2 x i64>, <2 x i64>* %D
+            %tmp3 = load <2 x i64>, ptr %C
+            %tmp4 = load <2 x i64>, ptr %D
             %sum2 = add <2 x i64> %tmp3, %tmp4
             %high_bits = lshr <2 x i64> %sum1, <i64 32, i64 32>
             %narrowed2 = trunc <2 x i64> %high_bits to <2 x i32>
@@ -954,44 +954,44 @@ define <4 x i32> @addhn_addhn2_4s(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C, <
             ret <4 x i32> %res
 }
 
-define <8 x i8> @subhn8b_natural(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i8> @subhn8b_natural(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn8b_natural:
 ;CHECK: subhn.8b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %
diff  = sub <8 x i16> %tmp1, %tmp2
         %high_bits = lshr <8 x i16> %
diff , <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
         %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
         ret <8 x i8> %narrowed
 }
 
-define <4 x i16> @subhn4h_natural(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i16> @subhn4h_natural(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn4h_natural:
 ;CHECK: subhn.4h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %
diff  = sub <4 x i32> %tmp1, %tmp2
         %high_bits = lshr <4 x i32> %
diff , <i32 16, i32 16, i32 16, i32 16>
         %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
         ret <4 x i16> %narrowed
 }
 
-define <2 x i32> @subhn2s_natural(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i32> @subhn2s_natural(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn2s_natural:
 ;CHECK: subhn.2s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %
diff  = sub <2 x i64> %tmp1, %tmp2
         %high_bits = lshr <2 x i64> %
diff , <i64 32, i64 32>
         %narrowed = trunc <2 x i64> %high_bits to <2 x i32>
         ret <2 x i32> %narrowed
 }
 
-define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn2_16b_natural:
 ;CHECK: subhn2.16b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %
diff  = sub <8 x i16> %tmp1, %tmp2
         %high_bits = lshr <8 x i16> %
diff , <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
         %narrowed = trunc <8 x i16> %high_bits to <8 x i8>
@@ -999,11 +999,11 @@ define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B
         ret <16 x i8> %res
 }
 
-define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn2_8h_natural:
 ;CHECK: subhn2.8h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %
diff  = sub <4 x i32> %tmp1, %tmp2
         %high_bits = lshr <4 x i32> %
diff , <i32 16, i32 16, i32 16, i32 16>
         %narrowed = trunc <4 x i32> %high_bits to <4 x i16>
@@ -1011,11 +1011,11 @@ define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B
         ret <8 x i16> %res
 }
 
-define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, <2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn2_4s_natural:
 ;CHECK: subhn2.4s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %
diff  = sub <2 x i64> %tmp1, %tmp2
         %high_bits = lshr <2 x i64> %
diff , <i64 32, i64 32>
         %narrowed = trunc <2 x i64> %high_bits to <2 x i32>

diff  --git a/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
index eb1fe6b094450..de07ed1b5d7ec 100644
--- a/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -aarch64-load-store-renaming=true -verify-machineinstrs -mtriple=arm64-linux-gnu -pre-RA-sched=linearize -enable-misched=false -disable-post-ra < %s | FileCheck %s
 
-%va_list = type {i8*, i8*, i8*, i32, i32}
+%va_list = type {ptr, ptr, ptr, i32, i32}
 
 @var = dso_local global %va_list zeroinitializer, align 8
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
 define dso_local void @test_simple(i32 %n, ...) {
 ; CHECK-LABEL: test_simple:
@@ -36,8 +36,7 @@ define dso_local void @test_simple(i32 %n, ...) {
 ; CHECK: movk    [[GRVR]], #65408, lsl #32
 ; CHECK: str     [[GRVR]], [x[[VA_LIST]], #24]
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
 
   ret void
 }
@@ -71,8 +70,7 @@ define dso_local void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
 ; CHECK: movk [[GRVR_OFFS]], #65424, lsl #32
 ; CHECK: str  [[GRVR_OFFS]], [x[[VA_LIST]], #24]
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
 
   ret void
 }
@@ -80,8 +78,7 @@ define dso_local void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
 define dso_local void @test_nospare([8 x i64], [8 x float], ...) {
 ; CHECK-LABEL: test_nospare:
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
 ; CHECK-NOT: sub sp, sp
 ; CHECK: mov [[STACK:x[0-9]+]], sp
 ; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
@@ -102,33 +99,29 @@ define dso_local void @test_offsetstack([8 x i64], [2 x i64], [3 x float], ...)
 ; CHECK-DAG: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
 ; CHECK-DAG: str [[STACK_TOP]], [x[[VAR]]]
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
   ret void
 }
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
 define dso_local void @test_va_end() nounwind {
 ; CHECK-LABEL: test_va_end:
 ; CHECK-NEXT: %bb.0
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_end(i8* %addr)
+  call void @llvm.va_end(ptr @var)
 
   ret void
 ; CHECK-NEXT: ret
 }
 
-declare void @llvm.va_copy(i8* %dest, i8* %src)
+declare void @llvm.va_copy(ptr %dest, ptr %src)
 
 @second_list = dso_local global %va_list zeroinitializer
 
 define dso_local void @test_va_copy() {
 ; CHECK-LABEL: test_va_copy:
-  %srcaddr = bitcast %va_list* @var to i8*
-  %dstaddr = bitcast %va_list* @second_list to i8*
-  call void @llvm.va_copy(i8* %dstaddr, i8* %srcaddr)
+  call void @llvm.va_copy(ptr @second_list, ptr @var)
 
 ; CHECK: add x[[SRC:[0-9]+]], {{x[0-9]+}}, :lo12:var
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vbitwise.ll b/llvm/test/CodeGen/AArch64/arm64-vbitwise.ll
index 2d3ce5f8b8e11..113ac7542968b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vbitwise.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vbitwise.ll
@@ -1,17 +1,17 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @rbit_8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @rbit_8b(ptr %A) nounwind {
 ;CHECK-LABEL: rbit_8b:
 ;CHECK: rbit.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp3 = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @rbit_16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @rbit_16b(ptr %A) nounwind {
 ;CHECK-LABEL: rbit_16b:
 ;CHECK: rbit.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp1 = load <16 x i8>, ptr %A
 	%tmp3 = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp3
 }
@@ -19,73 +19,71 @@ define <16 x i8> @rbit_16b(<16 x i8>* %A) nounwind {
 declare <8 x i8> @llvm.bitreverse.v8i8(<8 x i8>) nounwind readnone
 declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>) nounwind readnone
 
-define <8 x i16> @sxtl8h(<8 x i8>* %A) nounwind {
+define <8 x i16> @sxtl8h(ptr %A) nounwind {
 ;CHECK-LABEL: sxtl8h:
 ;CHECK: sshll.8h
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
   %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
   ret <8 x i16> %tmp2
 }
 
-define <8 x i16> @uxtl8h(<8 x i8>* %A) nounwind {
+define <8 x i16> @uxtl8h(ptr %A) nounwind {
 ;CHECK-LABEL: uxtl8h:
 ;CHECK: ushll.8h
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
   %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
   ret <8 x i16> %tmp2
 }
 
-define <4 x i32> @sxtl4s(<4 x i16>* %A) nounwind {
+define <4 x i32> @sxtl4s(ptr %A) nounwind {
 ;CHECK-LABEL: sxtl4s:
 ;CHECK: sshll.4s
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp1 = load <4 x i16>, ptr %A
   %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
   ret <4 x i32> %tmp2
 }
 
-define <4 x i32> @uxtl4s(<4 x i16>* %A) nounwind {
+define <4 x i32> @uxtl4s(ptr %A) nounwind {
 ;CHECK-LABEL: uxtl4s:
 ;CHECK: ushll.4s
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp1 = load <4 x i16>, ptr %A
   %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
   ret <4 x i32> %tmp2
 }
 
-define <2 x i64> @sxtl2d(<2 x i32>* %A) nounwind {
+define <2 x i64> @sxtl2d(ptr %A) nounwind {
 ;CHECK-LABEL: sxtl2d:
 ;CHECK: sshll.2d
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
   %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
   ret <2 x i64> %tmp2
 }
 
-define <2 x i64> @uxtl2d(<2 x i32>* %A) nounwind {
+define <2 x i64> @uxtl2d(ptr %A) nounwind {
 ;CHECK-LABEL: uxtl2d:
 ;CHECK: ushll.2d
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
   %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
   ret <2 x i64> %tmp2
 }
 
 ; Check for incorrect use of vector bic.
 ; rdar://11553859
-define void @test_vsliq(i8* nocapture %src, i8* nocapture %dest) nounwind noinline ssp {
+define void @test_vsliq(ptr nocapture %src, ptr nocapture %dest) nounwind noinline ssp {
 entry:
 ; CHECK-LABEL: test_vsliq:
 ; CHECK-NOT: bic
 ; CHECK: movi.2d [[REG1:v[0-9]+]], #0x0000ff000000ff
 ; CHECK: and.16b v{{[0-9]+}}, v{{[0-9]+}}, [[REG1]]
-  %0 = bitcast i8* %src to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0, align 16
-  %and.i = and <16 x i8> %1, <i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0>
-  %2 = bitcast <16 x i8> %and.i to <8 x i16>
-  %vshl_n = shl <8 x i16> %2, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  %3 = or <8 x i16> %2, %vshl_n
-  %4 = bitcast <8 x i16> %3 to <4 x i32>
-  %vshl_n8 = shl <4 x i32> %4, <i32 16, i32 16, i32 16, i32 16>
-  %5 = or <4 x i32> %4, %vshl_n8
-  %6 = bitcast <4 x i32> %5 to <16 x i8>
-  %7 = bitcast i8* %dest to <16 x i8>*
-  store <16 x i8> %6, <16 x i8>* %7, align 16
+  %0 = load <16 x i8>, ptr %src, align 16
+  %and.i = and <16 x i8> %0, <i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0>
+  %1 = bitcast <16 x i8> %and.i to <8 x i16>
+  %vshl_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %2 = or <8 x i16> %1, %vshl_n
+  %3 = bitcast <8 x i16> %2 to <4 x i32>
+  %vshl_n8 = shl <4 x i32> %3, <i32 16, i32 16, i32 16, i32 16>
+  %4 = or <4 x i32> %3, %vshl_n8
+  %5 = bitcast <4 x i32> %4 to <16 x i8>
+  store <16 x i8> %5, ptr %dest, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vcmp.ll b/llvm/test/CodeGen/AArch64/arm64-vcmp.ll
index 167cef9218a38..1e05b452de300 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcmp.ll
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 
-define void @fcmltz_4s(<4 x float> %a, <4 x i16>* %p) nounwind {
+define void @fcmltz_4s(<4 x float> %a, ptr %p) nounwind {
 ;CHECK-LABEL: fcmltz_4s:
 ;CHECK: fcmlt.4s [[REG:v[0-9]+]], v0, #0
 ;CHECK-NEXT: xtn.4h v[[REG_1:[0-9]+]], [[REG]]
@@ -9,33 +9,33 @@ define void @fcmltz_4s(<4 x float> %a, <4 x i16>* %p) nounwind {
 ;CHECK-NEXT: ret
   %tmp = fcmp olt <4 x float> %a, zeroinitializer
   %tmp2 = sext <4 x i1> %tmp to <4 x i16>
-  store <4 x i16> %tmp2, <4 x i16>* %p, align 8
+  store <4 x i16> %tmp2, ptr %p, align 8
   ret void
 }
 
-define <2 x i32> @facge_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x i32> @facge_2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: facge_2s:
 ;CHECK: facge.2s
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @facge_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x i32> @facge_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: facge_4s:
 ;CHECK: facge.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @facge_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x i64> @facge_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: facge_2d:
 ;CHECK: facge.2d
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -44,29 +44,29 @@ declare <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float>, <2 x float>)
 declare <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x i32> @facgt_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x i32> @facgt_2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: facgt_2s:
 ;CHECK: facgt.2s
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.facgt.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @facgt_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x i32> @facgt_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: facgt_4s:
 ;CHECK: facgt.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.facgt.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @facgt_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x i64> @facgt_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: facgt_2d:
 ;CHECK: facgt.2d
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.facgt.v2i64.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -109,77 +109,77 @@ define i64 @facgt_d(double %A, double %B) nounwind {
 declare i64 @llvm.aarch64.neon.facgt.i64.f64(double, double)
 declare i32 @llvm.aarch64.neon.facgt.i32.f32(float, float)
 
-define <8 x i8> @cmtst_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @cmtst_8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: cmtst_8b:
 ;CHECK: cmtst.8b
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %commonbits = and <8 x i8> %tmp1, %tmp2
   %mask = icmp ne <8 x i8> %commonbits, zeroinitializer
   %res = sext <8 x i1> %mask to <8 x i8>
   ret <8 x i8> %res
 }
 
-define <16 x i8> @cmtst_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @cmtst_16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: cmtst_16b:
 ;CHECK: cmtst.16b
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp1 = load <16 x i8>, ptr %A
+  %tmp2 = load <16 x i8>, ptr %B
   %commonbits = and <16 x i8> %tmp1, %tmp2
   %mask = icmp ne <16 x i8> %commonbits, zeroinitializer
   %res = sext <16 x i1> %mask to <16 x i8>
   ret <16 x i8> %res
 }
 
-define <4 x i16> @cmtst_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @cmtst_4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: cmtst_4h:
 ;CHECK: cmtst.4h
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %commonbits = and <4 x i16> %tmp1, %tmp2
   %mask = icmp ne <4 x i16> %commonbits, zeroinitializer
   %res = sext <4 x i1> %mask to <4 x i16>
   ret <4 x i16> %res
 }
 
-define <8 x i16> @cmtst_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @cmtst_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: cmtst_8h:
 ;CHECK: cmtst.8h
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %commonbits = and <8 x i16> %tmp1, %tmp2
   %mask = icmp ne <8 x i16> %commonbits, zeroinitializer
   %res = sext <8 x i1> %mask to <8 x i16>
   ret <8 x i16> %res
 }
 
-define <2 x i32> @cmtst_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @cmtst_2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: cmtst_2s:
 ;CHECK: cmtst.2s
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %commonbits = and <2 x i32> %tmp1, %tmp2
   %mask = icmp ne <2 x i32> %commonbits, zeroinitializer
   %res = sext <2 x i1> %mask to <2 x i32>
   ret <2 x i32> %res
 }
 
-define <4 x i32> @cmtst_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @cmtst_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: cmtst_4s:
 ;CHECK: cmtst.4s
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %commonbits = and <4 x i32> %tmp1, %tmp2
   %mask = icmp ne <4 x i32> %commonbits, zeroinitializer
   %res = sext <4 x i1> %mask to <4 x i32>
   ret <4 x i32> %res
 }
 
-define <2 x i64> @cmtst_2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @cmtst_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: cmtst_2d:
 ;CHECK: cmtst.2d
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
-  %tmp2 = load <2 x i64>, <2 x i64>* %B
+  %tmp1 = load <2 x i64>, ptr %A
+  %tmp2 = load <2 x i64>, ptr %B
   %commonbits = and <2 x i64> %tmp1, %tmp2
   %mask = icmp ne <2 x i64> %commonbits, zeroinitializer
   %res = sext <2 x i1> %mask to <2 x i64>

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vcnt.ll b/llvm/test/CodeGen/AArch64/arm64-vcnt.ll
index 4e8147cb806aa..f2113d45589b3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcnt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcnt.ll
@@ -1,49 +1,49 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @cls_8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @cls_8b(ptr %A) nounwind {
 ;CHECK-LABEL: cls_8b:
 ;CHECK: cls.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> %tmp1)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @cls_16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @cls_16b(ptr %A) nounwind {
 ;CHECK-LABEL: cls_16b:
 ;CHECK: cls.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp1 = load <16 x i8>, ptr %A
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> %tmp1)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @cls_4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @cls_4h(ptr %A) nounwind {
 ;CHECK-LABEL: cls_4h:
 ;CHECK: cls.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
+	%tmp1 = load <4 x i16>, ptr %A
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> %tmp1)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @cls_8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @cls_8h(ptr %A) nounwind {
 ;CHECK-LABEL: cls_8h:
 ;CHECK: cls.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
+	%tmp1 = load <8 x i16>, ptr %A
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> %tmp1)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @cls_2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @cls_2s(ptr %A) nounwind {
 ;CHECK-LABEL: cls_2s:
 ;CHECK: cls.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @cls_4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @cls_4s(ptr %A) nounwind {
 ;CHECK-LABEL: cls_4s:
 ;CHECK: cls.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp1 = load <4 x i32>, ptr %A
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vcombine.ll b/llvm/test/CodeGen/AArch64/arm64-vcombine.ll
index c084ee22e9752..ca9d16d86298f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcombine.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcombine.ll
@@ -3,7 +3,7 @@
 ; LowerCONCAT_VECTORS() was reversing the order of two parts.
 ; rdar://11558157
 ; rdar://11559553
-define <16 x i8> @test(<16 x i8> %q0, <16 x i8> %q1, i8* nocapture %dest) nounwind {
+define <16 x i8> @test(<16 x i8> %q0, <16 x i8> %q1, ptr nocapture %dest) nounwind {
 entry:
 ; CHECK-LABEL: test:
 ; CHECK: mov.d v0[1], v1[0]

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vcvt.ll b/llvm/test/CodeGen/AArch64/arm64-vcvt.ll
index 43ed1aba735c1..b4d01a05a5e73 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcvt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcvt.ll
@@ -857,19 +857,19 @@ define <2 x double> @ucvtf_2dc(<2 x i64> %A) nounwind {
 ;CHECK-LABEL: autogen_SD28458:
 ;CHECK: fcvt
 ;CHECK: ret
-define void @autogen_SD28458(<8 x double> %val.f64, <8 x float>* %addr.f32) {
+define void @autogen_SD28458(<8 x double> %val.f64, ptr %addr.f32) {
   %Tr53 = fptrunc <8 x double> %val.f64 to <8 x float>
-  store <8 x float> %Tr53, <8 x float>* %addr.f32
+  store <8 x float> %Tr53, ptr %addr.f32
   ret void
 }
 
 ;CHECK-LABEL: autogen_SD19225:
 ;CHECK: fcvt
 ;CHECK: ret
-define void @autogen_SD19225(<8 x double>* %addr.f64, <8 x float>* %addr.f32) {
-  %A = load <8 x float>, <8 x float>* %addr.f32
+define void @autogen_SD19225(ptr %addr.f64, ptr %addr.f32) {
+  %A = load <8 x float>, ptr %addr.f32
   %Tr53 = fpext <8 x float> %A to <8 x double>
-  store <8 x double> %Tr53, <8 x double>* %addr.f64
+  store <8 x double> %Tr53, ptr %addr.f64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vecCmpBr.ll b/llvm/test/CodeGen/AArch64/arm64-vecCmpBr.ll
index e49810ceabf25..37d2085c495a2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vecCmpBr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vecCmpBr.ll
@@ -18,7 +18,7 @@ entry:
   br i1 %tobool, label %if.then, label %return
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then
@@ -44,7 +44,7 @@ entry:
   br i1 %tobool, label %if.then, label %return
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then
@@ -68,7 +68,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then
@@ -91,7 +91,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then
@@ -114,7 +114,7 @@ entry:
   br i1 %tobool, label %if.then, label %return
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then
@@ -137,7 +137,7 @@ entry:
   br i1 %tobool, label %if.then, label %return
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then
@@ -160,7 +160,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then
@@ -183,7 +183,7 @@ entry:
   br i1 %tobool, label %return, label %if.then
 
 if.then:                                          ; preds = %entry
-  %call1 = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)() #4
+  %call1 = tail call i32 @bar() #4
   br label %return
 
 return:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vector-ext.ll b/llvm/test/CodeGen/AArch64/arm64-vector-ext.ll
index 8debd21ee6e60..197a385b0e7cb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-ext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-ext.ll
@@ -9,9 +9,9 @@
 
 %T0_30 = type <4 x i1>
 %T1_30 = type <4 x i32>
-define void @func30(%T0_30 %v0, %T1_30* %p1) {
+define void @func30(%T0_30 %v0, ptr %p1) {
   %r = zext %T0_30 %v0 to %T1_30
-  store %T1_30 %r, %T1_30* %p1
+  store %T1_30 %r, ptr %p1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vector-imm.ll b/llvm/test/CodeGen/AArch64/arm64-vector-imm.ll
index 0a80874172527..08bceb850df40 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-imm.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-imm.ll
@@ -1,41 +1,41 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind {
+define <8 x i8> @v_orrimm(ptr %A) nounwind {
 ; CHECK-LABEL: v_orrimm:
 ; CHECK-NOT: mov
 ; CHECK-NOT: mvn
 ; CHECK: orr
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp3 = or <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind {
+define <16 x i8> @v_orrimmQ(ptr %A) nounwind {
 ; CHECK: v_orrimmQ
 ; CHECK-NOT: mov
 ; CHECK-NOT: mvn
 ; CHECK: orr
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp1 = load <16 x i8>, ptr %A
 	%tmp3 = or <16 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind {
+define <8 x i8> @v_bicimm(ptr %A) nounwind {
 ; CHECK-LABEL: v_bicimm:
 ; CHECK-NOT: mov
 ; CHECK-NOT: mvn
 ; CHECK: bic
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
+	%tmp1 = load <8 x i8>, ptr %A
 	%tmp3 = and <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind {
+define <16 x i8> @v_bicimmQ(ptr %A) nounwind {
 ; CHECK-LABEL: v_bicimmQ:
 ; CHECK-NOT: mov
 ; CHECK-NOT: mvn
 ; CHECK: bic
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
+	%tmp1 = load <16 x i8>, ptr %A
 	%tmp3 = and <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
 	ret <16 x i8> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
index a5b9a27c48bc9..94074d1689f6a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -mcpu=generic -aarch64-neon-syntax=apple -mattr="+fullfp16" | FileCheck %s
 
-define void @test0f(float* nocapture %x, float %a) #0 {
+define void @test0f(ptr nocapture %x, float %a) #0 {
 ; CHECK-LABEL: test0f:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi.2d v1, #0000000000000000
@@ -11,12 +11,11 @@ define void @test0f(float* nocapture %x, float %a) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %0 = insertelement <4 x float> <float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %a, i32 0
-  %1 = bitcast float* %x to <4 x float>*
-  store <4 x float> %0, <4 x float>* %1, align 16
+  store <4 x float> %0, ptr %x, align 16
   ret void
 }
 
-define void @test1f(float* nocapture %x, float %a) #0 {
+define void @test1f(ptr nocapture %x, float %a) #0 {
 ; CHECK-LABEL: test1f:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    fmov.4s v1, #1.00000000
@@ -26,8 +25,7 @@ define void @test1f(float* nocapture %x, float %a) #0 {
 ; CHECK-NEXT:    ret
 entry:
   %0 = insertelement <4 x float> <float undef, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, float %a, i32 0
-  %1 = bitcast float* %x to <4 x float>*
-  store <4 x float> %0, <4 x float>* %1, align 16
+  store <4 x float> %0, ptr %x, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll b/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
index 6d05dad0cb037..54a23a03b5e71 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll
@@ -8,7 +8,7 @@
 %type3 = type { <4 x i16> }
 
 
-define hidden fastcc void @t1(%type1** %argtable) nounwind {
+define hidden fastcc void @t1(ptr %argtable) nounwind {
 ; CHECK-LABEL: t1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
@@ -16,13 +16,12 @@ define hidden fastcc void @t1(%type1** %argtable) nounwind {
 ; CHECK-NEXT:    str q0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %tmp1 = load %type1*, %type1** %argtable, align 8
-  %tmp2 = getelementptr inbounds %type1, %type1* %tmp1, i64 0, i32 0
-  store <16 x i8> zeroinitializer, <16 x i8>* %tmp2, align 16
+  %tmp1 = load ptr, ptr %argtable, align 8
+  store <16 x i8> zeroinitializer, ptr %tmp1, align 16
   ret void
 }
 
-define hidden fastcc void @t2(%type2** %argtable) nounwind {
+define hidden fastcc void @t2(ptr %argtable) nounwind {
 ; CHECK-LABEL: t2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
@@ -30,28 +29,27 @@ define hidden fastcc void @t2(%type2** %argtable) nounwind {
 ; CHECK-NEXT:    str d0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %tmp1 = load %type2*, %type2** %argtable, align 8
-  %tmp2 = getelementptr inbounds %type2, %type2* %tmp1, i64 0, i32 0
-  store <8 x i8> zeroinitializer, <8 x i8>* %tmp2, align 8
+  %tmp1 = load ptr, ptr %argtable, align 8
+  store <8 x i8> zeroinitializer, ptr %tmp1, align 8
   ret void
 }
 
 ; add a bunch of tests for rdar://11246289
 
- at globalArray64x2 = common global <2 x i64>* null, align 8
- at globalArray32x4 = common global <4 x i32>* null, align 8
- at globalArray16x8 = common global <8 x i16>* null, align 8
- at globalArray8x16 = common global <16 x i8>* null, align 8
- at globalArray64x1 = common global <1 x i64>* null, align 8
- at globalArray32x2 = common global <2 x i32>* null, align 8
- at globalArray16x4 = common global <4 x i16>* null, align 8
- at globalArray8x8 = common global <8 x i8>* null, align 8
- at floatglobalArray64x2 = common global <2 x double>* null, align 8
- at floatglobalArray32x4 = common global <4 x float>* null, align 8
- at floatglobalArray64x1 = common global <1 x double>* null, align 8
- at floatglobalArray32x2 = common global <2 x float>* null, align 8
-
-define void @fct1_64x2(<2 x i64>* nocapture %array, i64 %offset) nounwind ssp {
+ at globalArray64x2 = common global ptr null, align 8
+ at globalArray32x4 = common global ptr null, align 8
+ at globalArray16x8 = common global ptr null, align 8
+ at globalArray8x16 = common global ptr null, align 8
+ at globalArray64x1 = common global ptr null, align 8
+ at globalArray32x2 = common global ptr null, align 8
+ at globalArray16x4 = common global ptr null, align 8
+ at globalArray8x8 = common global ptr null, align 8
+ at floatglobalArray64x2 = common global ptr null, align 8
+ at floatglobalArray32x4 = common global ptr null, align 8
+ at floatglobalArray64x1 = common global ptr null, align 8
+ at floatglobalArray32x2 = common global ptr null, align 8
+
+define void @fct1_64x2(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_64x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray64x2
@@ -62,15 +60,15 @@ define void @fct1_64x2(<2 x i64>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %array, i64 %offset
-  %tmp = load <2 x i64>, <2 x i64>* %arrayidx, align 16
-  %tmp1 = load <2 x i64>*, <2 x i64>** @globalArray64x2, align 8
-  %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %tmp1, i64 %offset
-  store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <2 x i64>, ptr %array, i64 %offset
+  %tmp = load <2 x i64>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray64x2, align 8
+  %arrayidx1 = getelementptr inbounds <2 x i64>, ptr %tmp1, i64 %offset
+  store <2 x i64> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct2_64x2(<2 x i64>* nocapture %array) nounwind ssp {
+define void @fct2_64x2(ptr nocapture %array) nounwind ssp {
 ; CHECK-LABEL: fct2_64x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:globalArray64x2
@@ -80,15 +78,15 @@ define void @fct2_64x2(<2 x i64>* nocapture %array) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x8, #80]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %array, i64 3
-  %tmp = load <2 x i64>, <2 x i64>* %arrayidx, align 16
-  %tmp1 = load <2 x i64>*, <2 x i64>** @globalArray64x2, align 8
-  %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %tmp1, i64 5
-  store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <2 x i64>, ptr %array, i64 3
+  %tmp = load <2 x i64>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray64x2, align 8
+  %arrayidx1 = getelementptr inbounds <2 x i64>, ptr %tmp1, i64 5
+  store <2 x i64> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct1_32x4(<4 x i32>* nocapture %array, i64 %offset) nounwind ssp {
+define void @fct1_32x4(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_32x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray32x4
@@ -99,15 +97,15 @@ define void @fct1_32x4(<4 x i32>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %array, i64 %offset
-  %tmp = load <4 x i32>, <4 x i32>* %arrayidx, align 16
-  %tmp1 = load <4 x i32>*, <4 x i32>** @globalArray32x4, align 8
-  %arrayidx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %tmp1, i64 %offset
-  store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <4 x i32>, ptr %array, i64 %offset
+  %tmp = load <4 x i32>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray32x4, align 8
+  %arrayidx1 = getelementptr inbounds <4 x i32>, ptr %tmp1, i64 %offset
+  store <4 x i32> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct2_32x4(<4 x i32>* nocapture %array) nounwind ssp {
+define void @fct2_32x4(ptr nocapture %array) nounwind ssp {
 ; CHECK-LABEL: fct2_32x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:globalArray32x4
@@ -117,15 +115,15 @@ define void @fct2_32x4(<4 x i32>* nocapture %array) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x8, #80]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %array, i64 3
-  %tmp = load <4 x i32>, <4 x i32>* %arrayidx, align 16
-  %tmp1 = load <4 x i32>*, <4 x i32>** @globalArray32x4, align 8
-  %arrayidx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %tmp1, i64 5
-  store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <4 x i32>, ptr %array, i64 3
+  %tmp = load <4 x i32>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray32x4, align 8
+  %arrayidx1 = getelementptr inbounds <4 x i32>, ptr %tmp1, i64 5
+  store <4 x i32> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct1_16x8(<8 x i16>* nocapture %array, i64 %offset) nounwind ssp {
+define void @fct1_16x8(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_16x8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray16x8
@@ -136,15 +134,15 @@ define void @fct1_16x8(<8 x i16>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <8 x i16>, <8 x i16>* %array, i64 %offset
-  %tmp = load <8 x i16>, <8 x i16>* %arrayidx, align 16
-  %tmp1 = load <8 x i16>*, <8 x i16>** @globalArray16x8, align 8
-  %arrayidx1 = getelementptr inbounds <8 x i16>, <8 x i16>* %tmp1, i64 %offset
-  store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <8 x i16>, ptr %array, i64 %offset
+  %tmp = load <8 x i16>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray16x8, align 8
+  %arrayidx1 = getelementptr inbounds <8 x i16>, ptr %tmp1, i64 %offset
+  store <8 x i16> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct2_16x8(<8 x i16>* nocapture %array) nounwind ssp {
+define void @fct2_16x8(ptr nocapture %array) nounwind ssp {
 ; CHECK-LABEL: fct2_16x8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:globalArray16x8
@@ -154,15 +152,15 @@ define void @fct2_16x8(<8 x i16>* nocapture %array) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x8, #80]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <8 x i16>, <8 x i16>* %array, i64 3
-  %tmp = load <8 x i16>, <8 x i16>* %arrayidx, align 16
-  %tmp1 = load <8 x i16>*, <8 x i16>** @globalArray16x8, align 8
-  %arrayidx1 = getelementptr inbounds <8 x i16>, <8 x i16>* %tmp1, i64 5
-  store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <8 x i16>, ptr %array, i64 3
+  %tmp = load <8 x i16>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray16x8, align 8
+  %arrayidx1 = getelementptr inbounds <8 x i16>, ptr %tmp1, i64 5
+  store <8 x i16> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct1_8x16(<16 x i8>* nocapture %array, i64 %offset) nounwind ssp {
+define void @fct1_8x16(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_8x16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray8x16
@@ -173,15 +171,15 @@ define void @fct1_8x16(<16 x i8>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <16 x i8>, <16 x i8>* %array, i64 %offset
-  %tmp = load <16 x i8>, <16 x i8>* %arrayidx, align 16
-  %tmp1 = load <16 x i8>*, <16 x i8>** @globalArray8x16, align 8
-  %arrayidx1 = getelementptr inbounds <16 x i8>, <16 x i8>* %tmp1, i64 %offset
-  store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <16 x i8>, ptr %array, i64 %offset
+  %tmp = load <16 x i8>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray8x16, align 8
+  %arrayidx1 = getelementptr inbounds <16 x i8>, ptr %tmp1, i64 %offset
+  store <16 x i8> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct2_8x16(<16 x i8>* nocapture %array) nounwind ssp {
+define void @fct2_8x16(ptr nocapture %array) nounwind ssp {
 ; CHECK-LABEL: fct2_8x16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:globalArray8x16
@@ -191,15 +189,15 @@ define void @fct2_8x16(<16 x i8>* nocapture %array) nounwind ssp {
 ; CHECK-NEXT:    str q0, [x8, #80]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <16 x i8>, <16 x i8>* %array, i64 3
-  %tmp = load <16 x i8>, <16 x i8>* %arrayidx, align 16
-  %tmp1 = load <16 x i8>*, <16 x i8>** @globalArray8x16, align 8
-  %arrayidx1 = getelementptr inbounds <16 x i8>, <16 x i8>* %tmp1, i64 5
-  store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
+  %arrayidx = getelementptr inbounds <16 x i8>, ptr %array, i64 3
+  %tmp = load <16 x i8>, ptr %arrayidx, align 16
+  %tmp1 = load ptr, ptr @globalArray8x16, align 8
+  %arrayidx1 = getelementptr inbounds <16 x i8>, ptr %tmp1, i64 5
+  store <16 x i8> %tmp, ptr %arrayidx1, align 16
   ret void
 }
 
-define void @fct1_64x1(<1 x i64>* nocapture %array, i64 %offset) nounwind ssp {
+define void @fct1_64x1(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_64x1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray64x1
@@ -210,15 +208,15 @@ define void @fct1_64x1(<1 x i64>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str d0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <1 x i64>, <1 x i64>* %array, i64 %offset
-  %tmp = load <1 x i64>, <1 x i64>* %arrayidx, align 8
-  %tmp1 = load <1 x i64>*, <1 x i64>** @globalArray64x1, align 8
-  %arrayidx1 = getelementptr inbounds <1 x i64>, <1 x i64>* %tmp1, i64 %offset
-  store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds <1 x i64>, ptr %array, i64 %offset
+  %tmp = load <1 x i64>, ptr %arrayidx, align 8
+  %tmp1 = load ptr, ptr @globalArray64x1, align 8
+  %arrayidx1 = getelementptr inbounds <1 x i64>, ptr %tmp1, i64 %offset
+  store <1 x i64> %tmp, ptr %arrayidx1, align 8
   ret void
 }
 
-define void @fct2_64x1(<1 x i64>* nocapture %array) nounwind ssp {
+define void @fct2_64x1(ptr nocapture %array) nounwind ssp {
 ; CHECK-LABEL: fct2_64x1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:globalArray64x1
@@ -228,15 +226,15 @@ define void @fct2_64x1(<1 x i64>* nocapture %array) nounwind ssp {
 ; CHECK-NEXT:    str d0, [x8, #40]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <1 x i64>, <1 x i64>* %array, i64 3
-  %tmp = load <1 x i64>, <1 x i64>* %arrayidx, align 8
-  %tmp1 = load <1 x i64>*, <1 x i64>** @globalArray64x1, align 8
-  %arrayidx1 = getelementptr inbounds <1 x i64>, <1 x i64>* %tmp1, i64 5
-  store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds <1 x i64>, ptr %array, i64 3
+  %tmp = load <1 x i64>, ptr %arrayidx, align 8
+  %tmp1 = load ptr, ptr @globalArray64x1, align 8
+  %arrayidx1 = getelementptr inbounds <1 x i64>, ptr %tmp1, i64 5
+  store <1 x i64> %tmp, ptr %arrayidx1, align 8
   ret void
 }
 
-define void @fct1_32x2(<2 x i32>* nocapture %array, i64 %offset) nounwind ssp {
+define void @fct1_32x2(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_32x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray32x2
@@ -247,15 +245,15 @@ define void @fct1_32x2(<2 x i32>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str d0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <2 x i32>, <2 x i32>* %array, i64 %offset
-  %tmp = load <2 x i32>, <2 x i32>* %arrayidx, align 8
-  %tmp1 = load <2 x i32>*, <2 x i32>** @globalArray32x2, align 8
-  %arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %tmp1, i64 %offset
-  store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds <2 x i32>, ptr %array, i64 %offset
+  %tmp = load <2 x i32>, ptr %arrayidx, align 8
+  %tmp1 = load ptr, ptr @globalArray32x2, align 8
+  %arrayidx1 = getelementptr inbounds <2 x i32>, ptr %tmp1, i64 %offset
+  store <2 x i32> %tmp, ptr %arrayidx1, align 8
   ret void
 }
 
-define void @fct2_32x2(<2 x i32>* nocapture %array) nounwind ssp {
+define void @fct2_32x2(ptr nocapture %array) nounwind ssp {
 ; CHECK-LABEL: fct2_32x2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:globalArray32x2
@@ -265,15 +263,15 @@ define void @fct2_32x2(<2 x i32>* nocapture %array) nounwind ssp {
 ; CHECK-NEXT:    str d0, [x8, #40]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <2 x i32>, <2 x i32>* %array, i64 3
-  %tmp = load <2 x i32>, <2 x i32>* %arrayidx, align 8
-  %tmp1 = load <2 x i32>*, <2 x i32>** @globalArray32x2, align 8
-  %arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %tmp1, i64 5
-  store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds <2 x i32>, ptr %array, i64 3
+  %tmp = load <2 x i32>, ptr %arrayidx, align 8
+  %tmp1 = load ptr, ptr @globalArray32x2, align 8
+  %arrayidx1 = getelementptr inbounds <2 x i32>, ptr %tmp1, i64 5
+  store <2 x i32> %tmp, ptr %arrayidx1, align 8
   ret void
 }
 
-define void @fct1_16x4(<4 x i16>* nocapture %array, i64 %offset) nounwind ssp {
+define void @fct1_16x4(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_16x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray16x4
@@ -284,15 +282,15 @@ define void @fct1_16x4(<4 x i16>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str d0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <4 x i16>, <4 x i16>* %array, i64 %offset
-  %tmp = load <4 x i16>, <4 x i16>* %arrayidx, align 8
-  %tmp1 = load <4 x i16>*, <4 x i16>** @globalArray16x4, align 8
-  %arrayidx1 = getelementptr inbounds <4 x i16>, <4 x i16>* %tmp1, i64 %offset
-  store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds <4 x i16>, ptr %array, i64 %offset
+  %tmp = load <4 x i16>, ptr %arrayidx, align 8
+  %tmp1 = load ptr, ptr @globalArray16x4, align 8
+  %arrayidx1 = getelementptr inbounds <4 x i16>, ptr %tmp1, i64 %offset
+  store <4 x i16> %tmp, ptr %arrayidx1, align 8
   ret void
 }
 
-define void @fct2_16x4(<4 x i16>* nocapture %array) nounwind ssp {
+define void @fct2_16x4(ptr nocapture %array) nounwind ssp {
 ; CHECK-LABEL: fct2_16x4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:globalArray16x4
@@ -302,15 +300,15 @@ define void @fct2_16x4(<4 x i16>* nocapture %array) nounwind ssp {
 ; CHECK-NEXT:    str d0, [x8, #40]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <4 x i16>, <4 x i16>* %array, i64 3
-  %tmp = load <4 x i16>, <4 x i16>* %arrayidx, align 8
-  %tmp1 = load <4 x i16>*, <4 x i16>** @globalArray16x4, align 8
-  %arrayidx1 = getelementptr inbounds <4 x i16>, <4 x i16>* %tmp1, i64 5
-  store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds <4 x i16>, ptr %array, i64 3
+  %tmp = load <4 x i16>, ptr %arrayidx, align 8
+  %tmp1 = load ptr, ptr @globalArray16x4, align 8
+  %arrayidx1 = getelementptr inbounds <4 x i16>, ptr %tmp1, i64 5
+  store <4 x i16> %tmp, ptr %arrayidx1, align 8
   ret void
 }
 
-define void @fct1_8x8(<8 x i8>* nocapture %array, i64 %offset) nounwind ssp {
+define void @fct1_8x8(ptr nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-LABEL: fct1_8x8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x9, :got:globalArray8x8
@@ -321,238 +319,214 @@ define void @fct1_8x8(<8 x i8>* nocapture %array, i64 %offset) nounwind ssp {
 ; CHECK-NEXT:    str d0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %arrayidx = getelementptr inbounds <8 x i8>, <8 x i8>* %array, i64 %offset
-  %tmp = load <8 x i8>, <8 x i8>* %arrayidx, align 8
-  %tmp1 = load <8 x i8>*, <8 x i8>** @globalArray8x8, align 8
-  %arrayidx1 = getelementptr inbounds <8 x i8>, <8 x i8>* %tmp1, i64 %offset
-  store <8 x i8> %tmp, <8 x i8>* %arrayidx1, align 8
+  %arrayidx = getelementptr inbounds <8 x i8>, ptr %array, i64 %offset
+  %tmp = load <8 x i8>, ptr %arrayidx, align 8
+  %tmp1 = load ptr, ptr @globalArray8x8, align 8
+  %arrayidx1 = getelementptr inbounds <8 x i8>, ptr %tmp1, i64 %offset
+  store <8 x i8> %tmp, ptr %arrayidx1, align 8
   ret void
 }
 
 ; Add a bunch of tests for rdar://13258794: Match LDUR/STUR for D and Q
 ; registers for unscaled vector accesses
 
-define <1 x i64> @fct0(i8* %str) nounwind readonly ssp {
+define <1 x i64> @fct0(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct0:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <1 x i64>*
-  %0 = load <1 x i64>, <1 x i64>* %q, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <1 x i64>, ptr %p, align 8
   ret <1 x i64> %0
 }
 
-define <2 x i32> @fct1(i8* %str) nounwind readonly ssp {
+define <2 x i32> @fct1(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <2 x i32>*
-  %0 = load <2 x i32>, <2 x i32>* %q, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <2 x i32>, ptr %p, align 8
   ret <2 x i32> %0
 }
 
-define <4 x i16> @fct2(i8* %str) nounwind readonly ssp {
+define <4 x i16> @fct2(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <4 x i16>*
-  %0 = load <4 x i16>, <4 x i16>* %q, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <4 x i16>, ptr %p, align 8
   ret <4 x i16> %0
 }
 
-define <8 x i8> @fct3(i8* %str) nounwind readonly ssp {
+define <8 x i8> @fct3(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <8 x i8>*
-  %0 = load <8 x i8>, <8 x i8>* %q, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <8 x i8>, ptr %p, align 8
   ret <8 x i8> %0
 }
 
-define <2 x i64> @fct4(i8* %str) nounwind readonly ssp {
+define <2 x i64> @fct4(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <2 x i64>*
-  %0 = load <2 x i64>, <2 x i64>* %q, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <2 x i64>, ptr %p, align 16
   ret <2 x i64> %0
 }
 
-define <4 x i32> @fct5(i8* %str) nounwind readonly ssp {
+define <4 x i32> @fct5(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <4 x i32>*
-  %0 = load <4 x i32>, <4 x i32>* %q, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <4 x i32>, ptr %p, align 16
   ret <4 x i32> %0
 }
 
-define <8 x i16> @fct6(i8* %str) nounwind readonly ssp {
+define <8 x i16> @fct6(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct6:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <8 x i16>*
-  %0 = load <8 x i16>, <8 x i16>* %q, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <8 x i16>, ptr %p, align 16
   ret <8 x i16> %0
 }
 
-define <16 x i8> @fct7(i8* %str) nounwind readonly ssp {
+define <16 x i8> @fct7(ptr %str) nounwind readonly ssp {
 ; CHECK-LABEL: fct7:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <16 x i8>*
-  %0 = load <16 x i8>, <16 x i8>* %q, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <16 x i8>, ptr %p, align 16
   ret <16 x i8> %0
 }
 
-define void @fct8(i8* %str) nounwind ssp {
+define void @fct8(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    stur d0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <1 x i64>*
-  %0 = load <1 x i64>, <1 x i64>* %q, align 8
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <1 x i64>*
-  store <1 x i64> %0, <1 x i64>* %q2, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <1 x i64>, ptr %p, align 8
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <1 x i64> %0, ptr %p2, align 8
   ret void
 }
 
-define void @fct9(i8* %str) nounwind ssp {
+define void @fct9(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct9:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    stur d0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <2 x i32>*
-  %0 = load <2 x i32>, <2 x i32>* %q, align 8
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <2 x i32>*
-  store <2 x i32> %0, <2 x i32>* %q2, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <2 x i32>, ptr %p, align 8
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <2 x i32> %0, ptr %p2, align 8
   ret void
 }
 
-define void @fct10(i8* %str) nounwind ssp {
+define void @fct10(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct10:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    stur d0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <4 x i16>*
-  %0 = load <4 x i16>, <4 x i16>* %q, align 8
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <4 x i16>*
-  store <4 x i16> %0, <4 x i16>* %q2, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <4 x i16>, ptr %p, align 8
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <4 x i16> %0, ptr %p2, align 8
   ret void
 }
 
-define void @fct11(i8* %str) nounwind ssp {
+define void @fct11(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct11:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur d0, [x0, #3]
 ; CHECK-NEXT:    stur d0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <8 x i8>*
-  %0 = load <8 x i8>, <8 x i8>* %q, align 8
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <8 x i8>*
-  store <8 x i8> %0, <8 x i8>* %q2, align 8
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <8 x i8>, ptr %p, align 8
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <8 x i8> %0, ptr %p2, align 8
   ret void
 }
 
-define void @fct12(i8* %str) nounwind ssp {
+define void @fct12(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct12:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    stur q0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <2 x i64>*
-  %0 = load <2 x i64>, <2 x i64>* %q, align 16
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <2 x i64>*
-  store <2 x i64> %0, <2 x i64>* %q2, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <2 x i64>, ptr %p, align 16
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <2 x i64> %0, ptr %p2, align 16
   ret void
 }
 
-define void @fct13(i8* %str) nounwind ssp {
+define void @fct13(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct13:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    stur q0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <4 x i32>*
-  %0 = load <4 x i32>, <4 x i32>* %q, align 16
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <4 x i32>*
-  store <4 x i32> %0, <4 x i32>* %q2, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <4 x i32>, ptr %p, align 16
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <4 x i32> %0, ptr %p2, align 16
   ret void
 }
 
-define void @fct14(i8* %str) nounwind ssp {
+define void @fct14(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct14:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    stur q0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <8 x i16>*
-  %0 = load <8 x i16>, <8 x i16>* %q, align 16
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <8 x i16>*
-  store <8 x i16> %0, <8 x i16>* %q2, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <8 x i16>, ptr %p, align 16
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <8 x i16> %0, ptr %p2, align 16
   ret void
 }
 
-define void @fct15(i8* %str) nounwind ssp {
+define void @fct15(ptr %str) nounwind ssp {
 ; CHECK-LABEL: fct15:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldur q0, [x0, #3]
 ; CHECK-NEXT:    stur q0, [x0, #4]
 ; CHECK-NEXT:    ret
 entry:
-  %p = getelementptr inbounds i8, i8* %str, i64 3
-  %q = bitcast i8* %p to <16 x i8>*
-  %0 = load <16 x i8>, <16 x i8>* %q, align 16
-  %p2 = getelementptr inbounds i8, i8* %str, i64 4
-  %q2 = bitcast i8* %p2 to <16 x i8>*
-  store <16 x i8> %0, <16 x i8>* %q2, align 16
+  %p = getelementptr inbounds i8, ptr %str, i64 3
+  %0 = load <16 x i8>, ptr %p, align 16
+  %p2 = getelementptr inbounds i8, ptr %str, i64 4
+  store <16 x i8> %0, ptr %p2, align 16
   ret void
 }
 
@@ -560,220 +534,220 @@ entry:
 ; Part of <rdar://problem/14170854>
 ;
 ; Single loads with immediate offset.
-define <8 x i8> @fct16(i8* nocapture %sp0) {
+define <8 x i8> @fct16(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, #1]
 ; CHECK-NEXT:    mul.8b v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i8> %vec, %vec
   ret <8 x i8> %vmull.i
 }
 
-define <16 x i8> @fct17(i8* nocapture %sp0) {
+define <16 x i8> @fct17(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct17:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, #1]
 ; CHECK-NEXT:    mul.16b v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 1
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <16 x i8> %vec, %vec
   ret <16 x i8> %vmull.i
 }
 
-define <4 x i16> @fct18(i16* nocapture %sp0) {
+define <4 x i16> @fct18(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct18:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, #2]
 ; CHECK-NEXT:    mul.4h v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i16> %vec, %vec
   ret <4 x i16> %vmull.i
 }
 
-define <8 x i16> @fct19(i16* nocapture %sp0) {
+define <8 x i16> @fct19(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct19:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, #2]
 ; CHECK-NEXT:    mul.8h v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 1
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i16> %vec, %vec
   ret <8 x i16> %vmull.i
 }
 
-define <2 x i32> @fct20(i32* nocapture %sp0) {
+define <2 x i32> @fct20(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct20:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, #4]
 ; CHECK-NEXT:    mul.2s v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <2 x i32> %vec, %vec
   ret <2 x i32> %vmull.i
 }
 
-define <4 x i32> @fct21(i32* nocapture %sp0) {
+define <4 x i32> @fct21(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct21:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, #4]
 ; CHECK-NEXT:    mul.4s v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 1
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i32> %vec, %vec
   ret <4 x i32> %vmull.i
 }
 
-define <1 x i64> @fct22(i64* nocapture %sp0) {
+define <1 x i64> @fct22(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct22:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
    ret <1 x i64> %vec
 }
 
-define <2 x i64> @fct23(i64* nocapture %sp0) {
+define <2 x i64> @fct23(ptr nocapture %sp0) {
 ; CHECK-LABEL: fct23:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 1
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 1
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
   ret <2 x i64> %vec
 }
 
 ;
 ; Single loads with register offset.
-define <8 x i8> @fct24(i8* nocapture %sp0, i64 %offset) {
+define <8 x i8> @fct24(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct24:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, x1]
 ; CHECK-NEXT:    mul.8b v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i8> %vec, %vec
   ret <8 x i8> %vmull.i
 }
 
-define <16 x i8> @fct25(i8* nocapture %sp0, i64 %offset) {
+define <16 x i8> @fct25(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct25:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr b0, [x0, x1]
 ; CHECK-NEXT:    mul.16b v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i8, ptr %addr, align 1
   %vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <16 x i8> %vec, %vec
   ret <16 x i8> %vmull.i
 }
 
-define <4 x i16> @fct26(i16* nocapture %sp0, i64 %offset) {
+define <4 x i16> @fct26(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct26:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    mul.4h v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i16> %vec, %vec
   ret <4 x i16> %vmull.i
 }
 
-define <8 x i16> @fct27(i16* nocapture %sp0, i64 %offset) {
+define <8 x i16> @fct27(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct27:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr h0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    mul.8h v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i16, i16* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i16, i16* %addr, align 1
+  %addr = getelementptr i16, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i16, ptr %addr, align 1
   %vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <8 x i16> %vec, %vec
   ret <8 x i16> %vmull.i
 }
 
-define <2 x i32> @fct28(i32* nocapture %sp0, i64 %offset) {
+define <2 x i32> @fct28(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct28:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    mul.2s v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <2 x i32> %vec, %vec
   ret <2 x i32> %vmull.i
 }
 
-define <4 x i32> @fct29(i32* nocapture %sp0, i64 %offset) {
+define <4 x i32> @fct29(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct29:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr s0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    mul.4s v0, v0, v0
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i32, i32* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i32, i32* %addr, align 1
+  %addr = getelementptr i32, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i32, ptr %addr, align 1
   %vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
   %vmull.i = mul <4 x i32> %vec, %vec
   ret <4 x i32> %vmull.i
 }
 
-define <1 x i64> @fct30(i64* nocapture %sp0, i64 %offset) {
+define <1 x i64> @fct30(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct30:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
    ret <1 x i64> %vec
 }
 
-define <2 x i64> @fct31(i64* nocapture %sp0, i64 %offset) {
+define <2 x i64> @fct31(ptr nocapture %sp0, i64 %offset) {
 ; CHECK-LABEL: fct31:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i64, i64* %sp0, i64 %offset
-  %pix_sp0.0.copyload = load i64, i64* %addr, align 1
+  %addr = getelementptr i64, ptr %sp0, i64 %offset
+  %pix_sp0.0.copyload = load i64, ptr %addr, align 1
   %vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
   ret <2 x i64> %vec
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vext.ll b/llvm/test/CodeGen/AArch64/arm64-vext.ll
index f56e9e0f2b450..a56bd6b4e2f21 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vext.ll
@@ -6,14 +6,14 @@ define void @test_vext_s8() nounwind ssp {
   %xS8x8 = alloca <8 x i8>, align 8
   %__a = alloca <8 x i8>, align 8
   %__b = alloca <8 x i8>, align 8
-  %tmp = load <8 x i8>, <8 x i8>* %xS8x8, align 8
-  store <8 x i8> %tmp, <8 x i8>* %__a, align 8
-  %tmp1 = load <8 x i8>, <8 x i8>* %xS8x8, align 8
-  store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
-  %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
-  %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
+  %tmp = load <8 x i8>, ptr %xS8x8, align 8
+  store <8 x i8> %tmp, ptr %__a, align 8
+  %tmp1 = load <8 x i8>, ptr %xS8x8, align 8
+  store <8 x i8> %tmp1, ptr %__b, align 8
+  %tmp2 = load <8 x i8>, ptr %__a, align 8
+  %tmp3 = load <8 x i8>, ptr %__b, align 8
   %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
-  store <8 x i8> %vext, <8 x i8>* %xS8x8, align 8
+  store <8 x i8> %vext, ptr %xS8x8, align 8
   ret void
 }
 
@@ -23,14 +23,14 @@ define void @test_vext_u8() nounwind ssp {
   %xU8x8 = alloca <8 x i8>, align 8
   %__a = alloca <8 x i8>, align 8
   %__b = alloca <8 x i8>, align 8
-  %tmp = load <8 x i8>, <8 x i8>* %xU8x8, align 8
-  store <8 x i8> %tmp, <8 x i8>* %__a, align 8
-  %tmp1 = load <8 x i8>, <8 x i8>* %xU8x8, align 8
-  store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
-  %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
-  %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
+  %tmp = load <8 x i8>, ptr %xU8x8, align 8
+  store <8 x i8> %tmp, ptr %__a, align 8
+  %tmp1 = load <8 x i8>, ptr %xU8x8, align 8
+  store <8 x i8> %tmp1, ptr %__b, align 8
+  %tmp2 = load <8 x i8>, ptr %__a, align 8
+  %tmp3 = load <8 x i8>, ptr %__b, align 8
   %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
-  store <8 x i8> %vext, <8 x i8>* %xU8x8, align 8
+  store <8 x i8> %vext, ptr %xU8x8, align 8
   ret void
 }
 
@@ -40,14 +40,14 @@ define void @test_vext_p8() nounwind ssp {
   %xP8x8 = alloca <8 x i8>, align 8
   %__a = alloca <8 x i8>, align 8
   %__b = alloca <8 x i8>, align 8
-  %tmp = load <8 x i8>, <8 x i8>* %xP8x8, align 8
-  store <8 x i8> %tmp, <8 x i8>* %__a, align 8
-  %tmp1 = load <8 x i8>, <8 x i8>* %xP8x8, align 8
-  store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
-  %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
-  %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
+  %tmp = load <8 x i8>, ptr %xP8x8, align 8
+  store <8 x i8> %tmp, ptr %__a, align 8
+  %tmp1 = load <8 x i8>, ptr %xP8x8, align 8
+  store <8 x i8> %tmp1, ptr %__b, align 8
+  %tmp2 = load <8 x i8>, ptr %__a, align 8
+  %tmp3 = load <8 x i8>, ptr %__b, align 8
   %vext = shufflevector <8 x i8> %tmp2, <8 x i8> %tmp3, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
-  store <8 x i8> %vext, <8 x i8>* %xP8x8, align 8
+  store <8 x i8> %vext, ptr %xP8x8, align 8
   ret void
 }
 
@@ -57,18 +57,18 @@ define void @test_vext_s16() nounwind ssp {
   %xS16x4 = alloca <4 x i16>, align 8
   %__a = alloca <4 x i16>, align 8
   %__b = alloca <4 x i16>, align 8
-  %tmp = load <4 x i16>, <4 x i16>* %xS16x4, align 8
-  store <4 x i16> %tmp, <4 x i16>* %__a, align 8
-  %tmp1 = load <4 x i16>, <4 x i16>* %xS16x4, align 8
-  store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
-  %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
+  %tmp = load <4 x i16>, ptr %xS16x4, align 8
+  store <4 x i16> %tmp, ptr %__a, align 8
+  %tmp1 = load <4 x i16>, ptr %xS16x4, align 8
+  store <4 x i16> %tmp1, ptr %__b, align 8
+  %tmp2 = load <4 x i16>, ptr %__a, align 8
   %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
-  %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
+  %tmp4 = load <4 x i16>, ptr %__b, align 8
   %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
   %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
   %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
-  store <4 x i16> %vext, <4 x i16>* %xS16x4, align 8
+  store <4 x i16> %vext, ptr %xS16x4, align 8
   ret void
 }
 
@@ -78,18 +78,18 @@ define void @test_vext_u16() nounwind ssp {
   %xU16x4 = alloca <4 x i16>, align 8
   %__a = alloca <4 x i16>, align 8
   %__b = alloca <4 x i16>, align 8
-  %tmp = load <4 x i16>, <4 x i16>* %xU16x4, align 8
-  store <4 x i16> %tmp, <4 x i16>* %__a, align 8
-  %tmp1 = load <4 x i16>, <4 x i16>* %xU16x4, align 8
-  store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
-  %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
+  %tmp = load <4 x i16>, ptr %xU16x4, align 8
+  store <4 x i16> %tmp, ptr %__a, align 8
+  %tmp1 = load <4 x i16>, ptr %xU16x4, align 8
+  store <4 x i16> %tmp1, ptr %__b, align 8
+  %tmp2 = load <4 x i16>, ptr %__a, align 8
   %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
-  %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
+  %tmp4 = load <4 x i16>, ptr %__b, align 8
   %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
   %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
   %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
-  store <4 x i16> %vext, <4 x i16>* %xU16x4, align 8
+  store <4 x i16> %vext, ptr %xU16x4, align 8
   ret void
 }
 
@@ -99,18 +99,18 @@ define void @test_vext_p16() nounwind ssp {
   %xP16x4 = alloca <4 x i16>, align 8
   %__a = alloca <4 x i16>, align 8
   %__b = alloca <4 x i16>, align 8
-  %tmp = load <4 x i16>, <4 x i16>* %xP16x4, align 8
-  store <4 x i16> %tmp, <4 x i16>* %__a, align 8
-  %tmp1 = load <4 x i16>, <4 x i16>* %xP16x4, align 8
-  store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
-  %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
+  %tmp = load <4 x i16>, ptr %xP16x4, align 8
+  store <4 x i16> %tmp, ptr %__a, align 8
+  %tmp1 = load <4 x i16>, ptr %xP16x4, align 8
+  store <4 x i16> %tmp1, ptr %__b, align 8
+  %tmp2 = load <4 x i16>, ptr %__a, align 8
   %tmp3 = bitcast <4 x i16> %tmp2 to <8 x i8>
-  %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
+  %tmp4 = load <4 x i16>, ptr %__b, align 8
   %tmp5 = bitcast <4 x i16> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <4 x i16>
   %tmp7 = bitcast <8 x i8> %tmp5 to <4 x i16>
   %vext = shufflevector <4 x i16> %tmp6, <4 x i16> %tmp7, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-  store <4 x i16> %vext, <4 x i16>* %xP16x4, align 8
+  store <4 x i16> %vext, ptr %xP16x4, align 8
   ret void
 }
 
@@ -120,18 +120,18 @@ define void @test_vext_s32() nounwind ssp {
   %xS32x2 = alloca <2 x i32>, align 8
   %__a = alloca <2 x i32>, align 8
   %__b = alloca <2 x i32>, align 8
-  %tmp = load <2 x i32>, <2 x i32>* %xS32x2, align 8
-  store <2 x i32> %tmp, <2 x i32>* %__a, align 8
-  %tmp1 = load <2 x i32>, <2 x i32>* %xS32x2, align 8
-  store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
-  %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
+  %tmp = load <2 x i32>, ptr %xS32x2, align 8
+  store <2 x i32> %tmp, ptr %__a, align 8
+  %tmp1 = load <2 x i32>, ptr %xS32x2, align 8
+  store <2 x i32> %tmp1, ptr %__b, align 8
+  %tmp2 = load <2 x i32>, ptr %__a, align 8
   %tmp3 = bitcast <2 x i32> %tmp2 to <8 x i8>
-  %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
+  %tmp4 = load <2 x i32>, ptr %__b, align 8
   %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32>
   %tmp7 = bitcast <8 x i8> %tmp5 to <2 x i32>
   %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2>
-  store <2 x i32> %vext, <2 x i32>* %xS32x2, align 8
+  store <2 x i32> %vext, ptr %xS32x2, align 8
   ret void
 }
 
@@ -141,18 +141,18 @@ define void @test_vext_u32() nounwind ssp {
   %xU32x2 = alloca <2 x i32>, align 8
   %__a = alloca <2 x i32>, align 8
   %__b = alloca <2 x i32>, align 8
-  %tmp = load <2 x i32>, <2 x i32>* %xU32x2, align 8
-  store <2 x i32> %tmp, <2 x i32>* %__a, align 8
-  %tmp1 = load <2 x i32>, <2 x i32>* %xU32x2, align 8
-  store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
-  %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
+  %tmp = load <2 x i32>, ptr %xU32x2, align 8
+  store <2 x i32> %tmp, ptr %__a, align 8
+  %tmp1 = load <2 x i32>, ptr %xU32x2, align 8
+  store <2 x i32> %tmp1, ptr %__b, align 8
+  %tmp2 = load <2 x i32>, ptr %__a, align 8
   %tmp3 = bitcast <2 x i32> %tmp2 to <8 x i8>
-  %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
+  %tmp4 = load <2 x i32>, ptr %__b, align 8
   %tmp5 = bitcast <2 x i32> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <2 x i32>
   %tmp7 = bitcast <8 x i8> %tmp5 to <2 x i32>
   %vext = shufflevector <2 x i32> %tmp6, <2 x i32> %tmp7, <2 x i32> <i32 1, i32 2>
-  store <2 x i32> %vext, <2 x i32>* %xU32x2, align 8
+  store <2 x i32> %vext, ptr %xU32x2, align 8
   ret void
 }
 
@@ -162,18 +162,18 @@ define void @test_vext_f32() nounwind ssp {
   %xF32x2 = alloca <2 x float>, align 8
   %__a = alloca <2 x float>, align 8
   %__b = alloca <2 x float>, align 8
-  %tmp = load <2 x float>, <2 x float>* %xF32x2, align 8
-  store <2 x float> %tmp, <2 x float>* %__a, align 8
-  %tmp1 = load <2 x float>, <2 x float>* %xF32x2, align 8
-  store <2 x float> %tmp1, <2 x float>* %__b, align 8
-  %tmp2 = load <2 x float>, <2 x float>* %__a, align 8
+  %tmp = load <2 x float>, ptr %xF32x2, align 8
+  store <2 x float> %tmp, ptr %__a, align 8
+  %tmp1 = load <2 x float>, ptr %xF32x2, align 8
+  store <2 x float> %tmp1, ptr %__b, align 8
+  %tmp2 = load <2 x float>, ptr %__a, align 8
   %tmp3 = bitcast <2 x float> %tmp2 to <8 x i8>
-  %tmp4 = load <2 x float>, <2 x float>* %__b, align 8
+  %tmp4 = load <2 x float>, ptr %__b, align 8
   %tmp5 = bitcast <2 x float> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <2 x float>
   %tmp7 = bitcast <8 x i8> %tmp5 to <2 x float>
   %vext = shufflevector <2 x float> %tmp6, <2 x float> %tmp7, <2 x i32> <i32 1, i32 2>
-  store <2 x float> %vext, <2 x float>* %xF32x2, align 8
+  store <2 x float> %vext, ptr %xF32x2, align 8
   ret void
 }
 
@@ -184,18 +184,18 @@ define void @test_vext_s64() nounwind ssp {
   %xS64x1 = alloca <1 x i64>, align 8
   %__a = alloca <1 x i64>, align 8
   %__b = alloca <1 x i64>, align 8
-  %tmp = load <1 x i64>, <1 x i64>* %xS64x1, align 8
-  store <1 x i64> %tmp, <1 x i64>* %__a, align 8
-  %tmp1 = load <1 x i64>, <1 x i64>* %xS64x1, align 8
-  store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
-  %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
+  %tmp = load <1 x i64>, ptr %xS64x1, align 8
+  store <1 x i64> %tmp, ptr %__a, align 8
+  %tmp1 = load <1 x i64>, ptr %xS64x1, align 8
+  store <1 x i64> %tmp1, ptr %__b, align 8
+  %tmp2 = load <1 x i64>, ptr %__a, align 8
   %tmp3 = bitcast <1 x i64> %tmp2 to <8 x i8>
-  %tmp4 = load <1 x i64>, <1 x i64>* %__b, align 8
+  %tmp4 = load <1 x i64>, ptr %__b, align 8
   %tmp5 = bitcast <1 x i64> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <1 x i64>
   %tmp7 = bitcast <8 x i8> %tmp5 to <1 x i64>
   %vext = shufflevector <1 x i64> %tmp6, <1 x i64> %tmp7, <1 x i32> <i32 1>
-  store <1 x i64> %vext, <1 x i64>* %xS64x1, align 8
+  store <1 x i64> %vext, ptr %xS64x1, align 8
   ret void
 }
 
@@ -206,18 +206,18 @@ define void @test_vext_u64() nounwind ssp {
   %xU64x1 = alloca <1 x i64>, align 8
   %__a = alloca <1 x i64>, align 8
   %__b = alloca <1 x i64>, align 8
-  %tmp = load <1 x i64>, <1 x i64>* %xU64x1, align 8
-  store <1 x i64> %tmp, <1 x i64>* %__a, align 8
-  %tmp1 = load <1 x i64>, <1 x i64>* %xU64x1, align 8
-  store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
-  %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
+  %tmp = load <1 x i64>, ptr %xU64x1, align 8
+  store <1 x i64> %tmp, ptr %__a, align 8
+  %tmp1 = load <1 x i64>, ptr %xU64x1, align 8
+  store <1 x i64> %tmp1, ptr %__b, align 8
+  %tmp2 = load <1 x i64>, ptr %__a, align 8
   %tmp3 = bitcast <1 x i64> %tmp2 to <8 x i8>
-  %tmp4 = load <1 x i64>, <1 x i64>* %__b, align 8
+  %tmp4 = load <1 x i64>, ptr %__b, align 8
   %tmp5 = bitcast <1 x i64> %tmp4 to <8 x i8>
   %tmp6 = bitcast <8 x i8> %tmp3 to <1 x i64>
   %tmp7 = bitcast <8 x i8> %tmp5 to <1 x i64>
   %vext = shufflevector <1 x i64> %tmp6, <1 x i64> %tmp7, <1 x i32> <i32 1>
-  store <1 x i64> %vext, <1 x i64>* %xU64x1, align 8
+  store <1 x i64> %vext, ptr %xU64x1, align 8
   ret void
 }
 
@@ -227,14 +227,14 @@ define void @test_vextq_s8() nounwind ssp {
   %xS8x16 = alloca <16 x i8>, align 16
   %__a = alloca <16 x i8>, align 16
   %__b = alloca <16 x i8>, align 16
-  %tmp = load <16 x i8>, <16 x i8>* %xS8x16, align 16
-  store <16 x i8> %tmp, <16 x i8>* %__a, align 16
-  %tmp1 = load <16 x i8>, <16 x i8>* %xS8x16, align 16
-  store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
-  %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
-  %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
+  %tmp = load <16 x i8>, ptr %xS8x16, align 16
+  store <16 x i8> %tmp, ptr %__a, align 16
+  %tmp1 = load <16 x i8>, ptr %xS8x16, align 16
+  store <16 x i8> %tmp1, ptr %__b, align 16
+  %tmp2 = load <16 x i8>, ptr %__a, align 16
+  %tmp3 = load <16 x i8>, ptr %__b, align 16
   %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
-  store <16 x i8> %vext, <16 x i8>* %xS8x16, align 16
+  store <16 x i8> %vext, ptr %xS8x16, align 16
   ret void
 }
 
@@ -244,14 +244,14 @@ define void @test_vextq_u8() nounwind ssp {
   %xU8x16 = alloca <16 x i8>, align 16
   %__a = alloca <16 x i8>, align 16
   %__b = alloca <16 x i8>, align 16
-  %tmp = load <16 x i8>, <16 x i8>* %xU8x16, align 16
-  store <16 x i8> %tmp, <16 x i8>* %__a, align 16
-  %tmp1 = load <16 x i8>, <16 x i8>* %xU8x16, align 16
-  store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
-  %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
-  %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
+  %tmp = load <16 x i8>, ptr %xU8x16, align 16
+  store <16 x i8> %tmp, ptr %__a, align 16
+  %tmp1 = load <16 x i8>, ptr %xU8x16, align 16
+  store <16 x i8> %tmp1, ptr %__b, align 16
+  %tmp2 = load <16 x i8>, ptr %__a, align 16
+  %tmp3 = load <16 x i8>, ptr %__b, align 16
   %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
-  store <16 x i8> %vext, <16 x i8>* %xU8x16, align 16
+  store <16 x i8> %vext, ptr %xU8x16, align 16
   ret void
 }
 
@@ -261,14 +261,14 @@ define void @test_vextq_p8() nounwind ssp {
   %xP8x16 = alloca <16 x i8>, align 16
   %__a = alloca <16 x i8>, align 16
   %__b = alloca <16 x i8>, align 16
-  %tmp = load <16 x i8>, <16 x i8>* %xP8x16, align 16
-  store <16 x i8> %tmp, <16 x i8>* %__a, align 16
-  %tmp1 = load <16 x i8>, <16 x i8>* %xP8x16, align 16
-  store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
-  %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
-  %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
+  %tmp = load <16 x i8>, ptr %xP8x16, align 16
+  store <16 x i8> %tmp, ptr %__a, align 16
+  %tmp1 = load <16 x i8>, ptr %xP8x16, align 16
+  store <16 x i8> %tmp1, ptr %__b, align 16
+  %tmp2 = load <16 x i8>, ptr %__a, align 16
+  %tmp3 = load <16 x i8>, ptr %__b, align 16
   %vext = shufflevector <16 x i8> %tmp2, <16 x i8> %tmp3, <16 x i32> <i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21>
-  store <16 x i8> %vext, <16 x i8>* %xP8x16, align 16
+  store <16 x i8> %vext, ptr %xP8x16, align 16
   ret void
 }
 
@@ -278,18 +278,18 @@ define void @test_vextq_s16() nounwind ssp {
   %xS16x8 = alloca <8 x i16>, align 16
   %__a = alloca <8 x i16>, align 16
   %__b = alloca <8 x i16>, align 16
-  %tmp = load <8 x i16>, <8 x i16>* %xS16x8, align 16
-  store <8 x i16> %tmp, <8 x i16>* %__a, align 16
-  %tmp1 = load <8 x i16>, <8 x i16>* %xS16x8, align 16
-  store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
-  %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
+  %tmp = load <8 x i16>, ptr %xS16x8, align 16
+  store <8 x i16> %tmp, ptr %__a, align 16
+  %tmp1 = load <8 x i16>, ptr %xS16x8, align 16
+  store <8 x i16> %tmp1, ptr %__b, align 16
+  %tmp2 = load <8 x i16>, ptr %__a, align 16
   %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
-  %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
+  %tmp4 = load <8 x i16>, ptr %__b, align 16
   %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
   %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
   %vext = shufflevector <8 x i16> %tmp6, <8 x i16> %tmp7, <8 x i32> <i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>
-  store <8 x i16> %vext, <8 x i16>* %xS16x8, align 16
+  store <8 x i16> %vext, ptr %xS16x8, align 16
   ret void
 }
 
@@ -299,18 +299,18 @@ define void @test_vextq_u16() nounwind ssp {
   %xU16x8 = alloca <8 x i16>, align 16
   %__a = alloca <8 x i16>, align 16
   %__b = alloca <8 x i16>, align 16
-  %tmp = load <8 x i16>, <8 x i16>* %xU16x8, align 16
-  store <8 x i16> %tmp, <8 x i16>* %__a, align 16
-  %tmp1 = load <8 x i16>, <8 x i16>* %xU16x8, align 16
-  store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
-  %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
+  %tmp = load <8 x i16>, ptr %xU16x8, align 16
+  store <8 x i16> %tmp, ptr %__a, align 16
+  %tmp1 = load <8 x i16>, ptr %xU16x8, align 16
+  store <8 x i16> %tmp1, ptr %__b, align 16
+  %tmp2 = load <8 x i16>, ptr %__a, align 16
   %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
-  %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
+  %tmp4 = load <8 x i16>, ptr %__b, align 16
   %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
   %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
   %vext = shufflevector <8 x i16> %tmp6, <8 x i16> %tmp7, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
-  store <8 x i16> %vext, <8 x i16>* %xU16x8, align 16
+  store <8 x i16> %vext, ptr %xU16x8, align 16
   ret void
 }
 
@@ -320,18 +320,18 @@ define void @test_vextq_p16() nounwind ssp {
   %xP16x8 = alloca <8 x i16>, align 16
   %__a = alloca <8 x i16>, align 16
   %__b = alloca <8 x i16>, align 16
-  %tmp = load <8 x i16>, <8 x i16>* %xP16x8, align 16
-  store <8 x i16> %tmp, <8 x i16>* %__a, align 16
-  %tmp1 = load <8 x i16>, <8 x i16>* %xP16x8, align 16
-  store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
-  %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
+  %tmp = load <8 x i16>, ptr %xP16x8, align 16
+  store <8 x i16> %tmp, ptr %__a, align 16
+  %tmp1 = load <8 x i16>, ptr %xP16x8, align 16
+  store <8 x i16> %tmp1, ptr %__b, align 16
+  %tmp2 = load <8 x i16>, ptr %__a, align 16
   %tmp3 = bitcast <8 x i16> %tmp2 to <16 x i8>
-  %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
+  %tmp4 = load <8 x i16>, ptr %__b, align 16
   %tmp5 = bitcast <8 x i16> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <8 x i16>
   %tmp7 = bitcast <16 x i8> %tmp5 to <8 x i16>
   %vext = shufflevector <8 x i16> %tmp6, <8 x i16> %tmp7, <8 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
-  store <8 x i16> %vext, <8 x i16>* %xP16x8, align 16
+  store <8 x i16> %vext, ptr %xP16x8, align 16
   ret void
 }
 
@@ -341,18 +341,18 @@ define void @test_vextq_s32() nounwind ssp {
   %xS32x4 = alloca <4 x i32>, align 16
   %__a = alloca <4 x i32>, align 16
   %__b = alloca <4 x i32>, align 16
-  %tmp = load <4 x i32>, <4 x i32>* %xS32x4, align 16
-  store <4 x i32> %tmp, <4 x i32>* %__a, align 16
-  %tmp1 = load <4 x i32>, <4 x i32>* %xS32x4, align 16
-  store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
-  %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
+  %tmp = load <4 x i32>, ptr %xS32x4, align 16
+  store <4 x i32> %tmp, ptr %__a, align 16
+  %tmp1 = load <4 x i32>, ptr %xS32x4, align 16
+  store <4 x i32> %tmp1, ptr %__b, align 16
+  %tmp2 = load <4 x i32>, ptr %__a, align 16
   %tmp3 = bitcast <4 x i32> %tmp2 to <16 x i8>
-  %tmp4 = load <4 x i32>, <4 x i32>* %__b, align 16
+  %tmp4 = load <4 x i32>, ptr %__b, align 16
   %tmp5 = bitcast <4 x i32> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <4 x i32>
   %tmp7 = bitcast <16 x i8> %tmp5 to <4 x i32>
   %vext = shufflevector <4 x i32> %tmp6, <4 x i32> %tmp7, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
-  store <4 x i32> %vext, <4 x i32>* %xS32x4, align 16
+  store <4 x i32> %vext, ptr %xS32x4, align 16
   ret void
 }
 
@@ -362,18 +362,18 @@ define void @test_vextq_u32() nounwind ssp {
   %xU32x4 = alloca <4 x i32>, align 16
   %__a = alloca <4 x i32>, align 16
   %__b = alloca <4 x i32>, align 16
-  %tmp = load <4 x i32>, <4 x i32>* %xU32x4, align 16
-  store <4 x i32> %tmp, <4 x i32>* %__a, align 16
-  %tmp1 = load <4 x i32>, <4 x i32>* %xU32x4, align 16
-  store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
-  %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
+  %tmp = load <4 x i32>, ptr %xU32x4, align 16
+  store <4 x i32> %tmp, ptr %__a, align 16
+  %tmp1 = load <4 x i32>, ptr %xU32x4, align 16
+  store <4 x i32> %tmp1, ptr %__b, align 16
+  %tmp2 = load <4 x i32>, ptr %__a, align 16
   %tmp3 = bitcast <4 x i32> %tmp2 to <16 x i8>
-  %tmp4 = load <4 x i32>, <4 x i32>* %__b, align 16
+  %tmp4 = load <4 x i32>, ptr %__b, align 16
   %tmp5 = bitcast <4 x i32> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <4 x i32>
   %tmp7 = bitcast <16 x i8> %tmp5 to <4 x i32>
   %vext = shufflevector <4 x i32> %tmp6, <4 x i32> %tmp7, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
-  store <4 x i32> %vext, <4 x i32>* %xU32x4, align 16
+  store <4 x i32> %vext, ptr %xU32x4, align 16
   ret void
 }
 
@@ -383,18 +383,18 @@ define void @test_vextq_f32() nounwind ssp {
   %xF32x4 = alloca <4 x float>, align 16
   %__a = alloca <4 x float>, align 16
   %__b = alloca <4 x float>, align 16
-  %tmp = load <4 x float>, <4 x float>* %xF32x4, align 16
-  store <4 x float> %tmp, <4 x float>* %__a, align 16
-  %tmp1 = load <4 x float>, <4 x float>* %xF32x4, align 16
-  store <4 x float> %tmp1, <4 x float>* %__b, align 16
-  %tmp2 = load <4 x float>, <4 x float>* %__a, align 16
+  %tmp = load <4 x float>, ptr %xF32x4, align 16
+  store <4 x float> %tmp, ptr %__a, align 16
+  %tmp1 = load <4 x float>, ptr %xF32x4, align 16
+  store <4 x float> %tmp1, ptr %__b, align 16
+  %tmp2 = load <4 x float>, ptr %__a, align 16
   %tmp3 = bitcast <4 x float> %tmp2 to <16 x i8>
-  %tmp4 = load <4 x float>, <4 x float>* %__b, align 16
+  %tmp4 = load <4 x float>, ptr %__b, align 16
   %tmp5 = bitcast <4 x float> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <4 x float>
   %tmp7 = bitcast <16 x i8> %tmp5 to <4 x float>
   %vext = shufflevector <4 x float> %tmp6, <4 x float> %tmp7, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-  store <4 x float> %vext, <4 x float>* %xF32x4, align 16
+  store <4 x float> %vext, ptr %xF32x4, align 16
   ret void
 }
 
@@ -404,18 +404,18 @@ define void @test_vextq_s64() nounwind ssp {
   %xS64x2 = alloca <2 x i64>, align 16
   %__a = alloca <2 x i64>, align 16
   %__b = alloca <2 x i64>, align 16
-  %tmp = load <2 x i64>, <2 x i64>* %xS64x2, align 16
-  store <2 x i64> %tmp, <2 x i64>* %__a, align 16
-  %tmp1 = load <2 x i64>, <2 x i64>* %xS64x2, align 16
-  store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
-  %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16
+  %tmp = load <2 x i64>, ptr %xS64x2, align 16
+  store <2 x i64> %tmp, ptr %__a, align 16
+  %tmp1 = load <2 x i64>, ptr %xS64x2, align 16
+  store <2 x i64> %tmp1, ptr %__b, align 16
+  %tmp2 = load <2 x i64>, ptr %__a, align 16
   %tmp3 = bitcast <2 x i64> %tmp2 to <16 x i8>
-  %tmp4 = load <2 x i64>, <2 x i64>* %__b, align 16
+  %tmp4 = load <2 x i64>, ptr %__b, align 16
   %tmp5 = bitcast <2 x i64> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <2 x i64>
   %tmp7 = bitcast <16 x i8> %tmp5 to <2 x i64>
   %vext = shufflevector <2 x i64> %tmp6, <2 x i64> %tmp7, <2 x i32> <i32 1, i32 2>
-  store <2 x i64> %vext, <2 x i64>* %xS64x2, align 16
+  store <2 x i64> %vext, ptr %xS64x2, align 16
   ret void
 }
 
@@ -425,18 +425,18 @@ define void @test_vextq_u64() nounwind ssp {
   %xU64x2 = alloca <2 x i64>, align 16
   %__a = alloca <2 x i64>, align 16
   %__b = alloca <2 x i64>, align 16
-  %tmp = load <2 x i64>, <2 x i64>* %xU64x2, align 16
-  store <2 x i64> %tmp, <2 x i64>* %__a, align 16
-  %tmp1 = load <2 x i64>, <2 x i64>* %xU64x2, align 16
-  store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
-  %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16
+  %tmp = load <2 x i64>, ptr %xU64x2, align 16
+  store <2 x i64> %tmp, ptr %__a, align 16
+  %tmp1 = load <2 x i64>, ptr %xU64x2, align 16
+  store <2 x i64> %tmp1, ptr %__b, align 16
+  %tmp2 = load <2 x i64>, ptr %__a, align 16
   %tmp3 = bitcast <2 x i64> %tmp2 to <16 x i8>
-  %tmp4 = load <2 x i64>, <2 x i64>* %__b, align 16
+  %tmp4 = load <2 x i64>, ptr %__b, align 16
   %tmp5 = bitcast <2 x i64> %tmp4 to <16 x i8>
   %tmp6 = bitcast <16 x i8> %tmp3 to <2 x i64>
   %tmp7 = bitcast <16 x i8> %tmp5 to <2 x i64>
   %vext = shufflevector <2 x i64> %tmp6, <2 x i64> %tmp7, <2 x i32> <i32 1, i32 2>
-  store <2 x i64> %vext, <2 x i64>* %xU64x2, align 16
+  store <2 x i64> %vext, ptr %xU64x2, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
index 396d9efe4566d..bfb1cfbf6cf8c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
@@ -1,158 +1,158 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @shadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @shadd8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: shadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    shadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.shadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @shadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @shadd16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: shadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    shadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.shadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @shadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @shadd4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: shadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    shadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.shadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @shadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @shadd8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: shadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    shadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.shadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @shadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @shadd2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: shadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    shadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.shadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @shadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @shadd4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: shadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    shadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.shadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <8 x i8> @uhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uhadd8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uhadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uhadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @uhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uhadd16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uhadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uhadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @uhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uhadd4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uhadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @uhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uhadd8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uhadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uhadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @uhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uhadd2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uhadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @uhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uhadd4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uhadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uhadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -173,163 +173,163 @@ declare <16 x i8> @llvm.aarch64.neon.uhadd.v16i8(<16 x i8>, <16 x i8>) nounwind
 declare <8 x i16> @llvm.aarch64.neon.uhadd.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.uhadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @srhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @srhadd8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: srhadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    srhadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.srhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @srhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @srhadd16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: srhadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    srhadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.srhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @srhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @srhadd4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: srhadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    srhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.srhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @srhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @srhadd8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: srhadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    srhadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.srhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @srhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @srhadd2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: srhadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    srhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.srhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @srhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @srhadd4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: srhadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    srhadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.srhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <8 x i8> @urhadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @urhadd8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: urhadd8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    urhadd.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.urhadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @urhadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @urhadd16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: urhadd16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    urhadd.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.urhadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @urhadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @urhadd4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: urhadd4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    urhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.urhadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @urhadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @urhadd8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: urhadd8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    urhadd.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.urhadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @urhadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @urhadd2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: urhadd2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    urhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.urhadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @urhadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @urhadd4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: urhadd4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    urhadd.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.urhadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define void @testLowerToSRHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testLowerToSRHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSRHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.8b v0, v0, v1
@@ -341,11 +341,11 @@ define void @testLowerToSRHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest
   %add2 = add <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %resulti16 = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSRHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testLowerToSRHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSRHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.4h v0, v0, v1
@@ -357,11 +357,11 @@ define void @testLowerToSRHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %d
   %add2 = add <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
   %resulti16 = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSRHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testLowerToSRHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSRHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.2s v0, v0, v1
@@ -373,11 +373,11 @@ define void @testLowerToSRHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %d
   %add2 = add <2 x i64> %add1, <i64 1, i64 1>
   %resulti16 = lshr <2 x i64> %add2, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSRHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testLowerToSRHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSRHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.16b v0, v0, v1
@@ -389,11 +389,11 @@ define void @testLowerToSRHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %
   %add2 = add <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %resulti16 = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSRHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testLowerToSRHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSRHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.8h v0, v0, v1
@@ -405,11 +405,11 @@ define void @testLowerToSRHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %d
   %add2 = add <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %resulti16 = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSRHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testLowerToSRHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSRHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    srhadd.4s v0, v0, v1
@@ -421,11 +421,11 @@ define void @testLowerToSRHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %d
   %add2 = add <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
   %resulti16 = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testLowerToSHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.8b v0, v0, v1
@@ -436,11 +436,11 @@ define void @testLowerToSHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest)
   %add = add <8 x i16> %sextsrc1, %sextsrc2
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testLowerToSHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.4h v0, v0, v1
@@ -451,11 +451,11 @@ define void @testLowerToSHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %de
   %add = add <4 x i32> %sextsrc1, %sextsrc2
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testLowerToSHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.2s v0, v0, v1
@@ -466,11 +466,11 @@ define void @testLowerToSHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %de
   %add = add <2 x i64> %sextsrc1, %sextsrc2
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testLowerToSHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.16b v0, v0, v1
@@ -481,11 +481,11 @@ define void @testLowerToSHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %d
   %add = add <16 x i16> %sextsrc1, %sextsrc2
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testLowerToSHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.8h v0, v0, v1
@@ -496,11 +496,11 @@ define void @testLowerToSHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %de
   %add = add <8 x i32> %sextsrc1, %sextsrc2
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testLowerToSHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shadd.4s v0, v0, v1
@@ -511,11 +511,11 @@ define void @testLowerToSHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %de
   %add = add <4 x i64> %sextsrc1, %sextsrc2
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToURHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testLowerToURHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToURHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.8b v0, v0, v1
@@ -527,11 +527,11 @@ define void @testLowerToURHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest
   %add2 = add <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %resulti16 = lshr <8 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToURHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testLowerToURHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToURHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.4h v0, v0, v1
@@ -543,11 +543,11 @@ define void @testLowerToURHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %d
   %add2 = add <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
   %resulti16 = lshr <4 x i32> %add2, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToURHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testLowerToURHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToURHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.2s v0, v0, v1
@@ -559,11 +559,11 @@ define void @testLowerToURHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %d
   %add2 = add <2 x i64> %add1, <i64 1, i64 1>
   %resulti16 = lshr <2 x i64> %add2, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToURHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testLowerToURHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToURHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.16b v0, v0, v1
@@ -575,11 +575,11 @@ define void @testLowerToURHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %
   %add2 = add <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %resulti16 = lshr <16 x i16> %add2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToURHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testLowerToURHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToURHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.8h v0, v0, v1
@@ -591,11 +591,11 @@ define void @testLowerToURHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %d
   %add2 = add <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %resulti16 = lshr <8 x i32> %add2, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToURHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testLowerToURHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToURHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    urhadd.4s v0, v0, v1
@@ -607,11 +607,11 @@ define void @testLowerToURHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %d
   %add2 = add <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
   %resulti16 = lshr <4 x i64> %add2, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest) nounwind {
+define void @testLowerToUHADD8b(<8 x i8> %src1, <8 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.8b v0, v0, v1
@@ -622,11 +622,11 @@ define void @testLowerToUHADD8b(<8 x i8> %src1, <8 x i8> %src2, <8 x i8>* %dest)
   %add = add <8 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %dest) nounwind {
+define void @testLowerToUHADD4h(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
@@ -637,11 +637,11 @@ define void @testLowerToUHADD4h(<4 x i16> %src1, <4 x i16> %src2, <4 x i16>* %de
   %add = add <4 x i32> %zextsrc1, %zextsrc2
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %dest) nounwind {
+define void @testLowerToUHADD2s(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.2s v0, v0, v1
@@ -652,11 +652,11 @@ define void @testLowerToUHADD2s(<2 x i32> %src1, <2 x i32> %src2, <2 x i32>* %de
   %add = add <2 x i64> %zextsrc1, %zextsrc2
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %dest) nounwind {
+define void @testLowerToUHADD16b(<16 x i8> %src1, <16 x i8> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.16b v0, v0, v1
@@ -667,11 +667,11 @@ define void @testLowerToUHADD16b(<16 x i8> %src1, <16 x i8> %src2, <16 x i8>* %d
   %add = add <16 x i16> %zextsrc1, %zextsrc2
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %dest) nounwind {
+define void @testLowerToUHADD8h(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.8h v0, v0, v1
@@ -682,11 +682,11 @@ define void @testLowerToUHADD8h(<8 x i16> %src1, <8 x i16> %src2, <8 x i16>* %de
   %add = add <8 x i32> %zextsrc1, %zextsrc2
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %dest) nounwind {
+define void @testLowerToUHADD4s(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uhadd.4s v0, v0, v1
@@ -697,7 +697,7 @@ define void @testLowerToUHADD4s(<4 x i32> %src1, <4 x i32> %src2, <4 x i32>* %de
   %add = add <4 x i64> %zextsrc1, %zextsrc2
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
@@ -878,7 +878,7 @@ define <4 x i16> @hadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) nounwind {
 
 
 
-define void @testLowerToSHADD8b_c(<8 x i8> %src1, <8 x i8>* %dest) nounwind {
+define void @testLowerToSHADD8b_c(<8 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD8b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v1, #10
@@ -889,11 +889,11 @@ define void @testLowerToSHADD8b_c(<8 x i8> %src1, <8 x i8>* %dest) nounwind {
   %add = add <8 x i16> %sextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD4h_c(<4 x i16> %src1, <4 x i16>* %dest) nounwind {
+define void @testLowerToSHADD4h_c(<4 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD4h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4h v1, #10
@@ -904,11 +904,11 @@ define void @testLowerToSHADD4h_c(<4 x i16> %src1, <4 x i16>* %dest) nounwind {
   %add = add <4 x i32> %sextsrc1, <i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD2s_c(<2 x i32> %src1, <2 x i32>* %dest) nounwind {
+define void @testLowerToSHADD2s_c(<2 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD2s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.2s v1, #10
@@ -919,11 +919,11 @@ define void @testLowerToSHADD2s_c(<2 x i32> %src1, <2 x i32>* %dest) nounwind {
   %add = add <2 x i64> %sextsrc1, <i64 10, i64 10>
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToSHADD16b_c(<16 x i8> %src1, <16 x i8>* %dest) nounwind {
+define void @testLowerToSHADD16b_c(<16 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD16b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v1, #10
@@ -934,11 +934,11 @@ define void @testLowerToSHADD16b_c(<16 x i8> %src1, <16 x i8>* %dest) nounwind {
   %add = add <16 x i16> %sextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD8h_c(<8 x i16> %src1, <8 x i16>* %dest) nounwind {
+define void @testLowerToSHADD8h_c(<8 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD8h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8h v1, #10
@@ -949,11 +949,11 @@ define void @testLowerToSHADD8h_c(<8 x i16> %src1, <8 x i16>* %dest) nounwind {
   %add = add <8 x i32> %sextsrc1, <i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToSHADD4s_c(<4 x i32> %src1, <4 x i32>* %dest) nounwind {
+define void @testLowerToSHADD4s_c(<4 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToSHADD4s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4s v1, #10
@@ -964,11 +964,11 @@ define void @testLowerToSHADD4s_c(<4 x i32> %src1, <4 x i32>* %dest) nounwind {
   %add = add <4 x i64> %sextsrc1, <i64 10, i64 10, i64 10, i64 10>
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8b_c(<8 x i8> %src1, <8 x i8>* %dest) nounwind {
+define void @testLowerToUHADD8b_c(<8 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD8b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8b v1, #10
@@ -979,11 +979,11 @@ define void @testLowerToUHADD8b_c(<8 x i8> %src1, <8 x i8>* %dest) nounwind {
   %add = add <8 x i16> %zextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <8 x i16> %resulti16 to <8 x i8>
-  store <8 x i8> %result, <8 x i8>* %dest, align 8
+  store <8 x i8> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD4h_c(<4 x i16> %src1, <4 x i16>* %dest) nounwind {
+define void @testLowerToUHADD4h_c(<4 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD4h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4h v1, #10
@@ -994,11 +994,11 @@ define void @testLowerToUHADD4h_c(<4 x i16> %src1, <4 x i16>* %dest) nounwind {
   %add = add <4 x i32> %zextsrc1, <i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
   %result = trunc <4 x i32> %resulti16 to <4 x i16>
-  store <4 x i16> %result, <4 x i16>* %dest, align 8
+  store <4 x i16> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD2s_c(<2 x i32> %src1, <2 x i32>* %dest) nounwind {
+define void @testLowerToUHADD2s_c(<2 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD2s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.2s v1, #10
@@ -1009,11 +1009,11 @@ define void @testLowerToUHADD2s_c(<2 x i32> %src1, <2 x i32>* %dest) nounwind {
   %add = add <2 x i64> %zextsrc1, <i64 10, i64 10>
   %resulti16 = lshr <2 x i64> %add, <i64 1, i64 1>
   %result = trunc <2 x i64> %resulti16 to <2 x i32>
-  store <2 x i32> %result, <2 x i32>* %dest, align 8
+  store <2 x i32> %result, ptr %dest, align 8
   ret void
 }
 
-define void @testLowerToUHADD16b_c(<16 x i8> %src1, <16 x i8>* %dest) nounwind {
+define void @testLowerToUHADD16b_c(<16 x i8> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD16b_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v1, #10
@@ -1024,11 +1024,11 @@ define void @testLowerToUHADD16b_c(<16 x i8> %src1, <16 x i8>* %dest) nounwind {
   %add = add <16 x i16> %zextsrc1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %resulti16 = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %result = trunc <16 x i16> %resulti16 to <16 x i8>
-  store <16 x i8> %result, <16 x i8>* %dest, align 16
+  store <16 x i8> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD8h_c(<8 x i16> %src1, <8 x i16>* %dest) nounwind {
+define void @testLowerToUHADD8h_c(<8 x i16> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD8h_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.8h v1, #10
@@ -1039,11 +1039,11 @@ define void @testLowerToUHADD8h_c(<8 x i16> %src1, <8 x i16>* %dest) nounwind {
   %add = add <8 x i32> %zextsrc1, <i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10>
   %resulti16 = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %result = trunc <8 x i32> %resulti16 to <8 x i16>
-  store <8 x i16> %result, <8 x i16>* %dest, align 16
+  store <8 x i16> %result, ptr %dest, align 16
   ret void
 }
 
-define void @testLowerToUHADD4s_c(<4 x i32> %src1, <4 x i32>* %dest) nounwind {
+define void @testLowerToUHADD4s_c(<4 x i32> %src1, ptr %dest) nounwind {
 ; CHECK-LABEL: testLowerToUHADD4s_c:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.4s v1, #10
@@ -1054,7 +1054,7 @@ define void @testLowerToUHADD4s_c(<4 x i32> %src1, <4 x i32>* %dest) nounwind {
   %add = add <4 x i64> %zextsrc1, <i64 10, i64 10, i64 10, i64 10>
   %resulti16 = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
   %result = trunc <4 x i64> %resulti16 to <4 x i32>
-  store <4 x i32> %result, <4 x i32>* %dest, align 16
+  store <4 x i32> %result, ptr %dest, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vhsub.ll b/llvm/test/CodeGen/AArch64/arm64-vhsub.ll
index b2ee87f1e3fbf..b4e39d0b72497 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vhsub.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vhsub.ll
@@ -1,109 +1,109 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @shsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @shsub8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shsub8b:
 ;CHECK: shsub.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.shsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @shsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @shsub16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shsub16b:
 ;CHECK: shsub.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.shsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @shsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @shsub4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shsub4h:
 ;CHECK: shsub.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.shsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @shsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @shsub8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shsub8h:
 ;CHECK: shsub.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.shsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @shsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @shsub2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shsub2s:
 ;CHECK: shsub.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.shsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @shsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @shsub4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shsub4s:
 ;CHECK: shsub.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.shsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <8 x i8> @uhsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uhsub8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uhsub8b:
 ;CHECK: uhsub.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uhsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @uhsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uhsub16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uhsub16b:
 ;CHECK: uhsub.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uhsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @uhsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uhsub4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uhsub4h:
 ;CHECK: uhsub.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uhsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @uhsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uhsub8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uhsub8h:
 ;CHECK: uhsub.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uhsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @uhsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uhsub2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uhsub2s:
 ;CHECK: uhsub.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uhsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @uhsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uhsub4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uhsub4s:
 ;CHECK: uhsub.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uhsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll b/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll
index 26470bf0c1187..01d778bfd6af7 100644
--- a/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-virtual_base.ll
@@ -2,36 +2,36 @@
 ; <rdar://13463602>
 
 %struct.Counter_Struct = type { i64, i64 }
-%struct.Bicubic_Patch_Struct = type { %struct.Method_Struct*, i32, %struct.Object_Struct*, %struct.Texture_Struct*, %struct.Interior_Struct*, %struct.Object_Struct*, %struct.Object_Struct*, %struct.Bounding_Box_Struct, i64, i32, i32, i32, [4 x [4 x [3 x double]]], [3 x double], double, double, %struct.Bezier_Node_Struct* }
-%struct.Method_Struct = type { i32 (%struct.Object_Struct*, %struct.Ray_Struct*, %struct.istack_struct*)*, i32 (double*, %struct.Object_Struct*)*, void (double*, %struct.Object_Struct*, %struct.istk_entry*)*, i8* (%struct.Object_Struct*)*, void (%struct.Object_Struct*, double*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*, double*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*, double*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*, %struct.Transform_Struct*)*, void (%struct.Object_Struct*)*, void (%struct.Object_Struct*)* }
-%struct.Object_Struct = type { %struct.Method_Struct*, i32, %struct.Object_Struct*, %struct.Texture_Struct*, %struct.Interior_Struct*, %struct.Object_Struct*, %struct.Object_Struct*, %struct.Bounding_Box_Struct, i64 }
-%struct.Texture_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.9, %struct.Texture_Struct*, %struct.Pigment_Struct*, %struct.Tnormal_Struct*, %struct.Finish_Struct*, %struct.Texture_Struct*, i32 }
-%struct.Warps_Struct = type { i16, %struct.Warps_Struct* }
-%struct.Pattern_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.6 }
-%struct.Blend_Map_Struct = type { i16, i16, i16, i64, %struct.Blend_Map_Entry* }
+%struct.Bicubic_Patch_Struct = type { ptr, i32, ptr, ptr, ptr, ptr, ptr, %struct.Bounding_Box_Struct, i64, i32, i32, i32, [4 x [4 x [3 x double]]], [3 x double], double, double, ptr }
+%struct.Method_Struct = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
+%struct.Object_Struct = type { ptr, i32, ptr, ptr, ptr, ptr, ptr, %struct.Bounding_Box_Struct, i64 }
+%struct.Texture_Struct = type { i16, i16, i16, i32, float, float, float, ptr, ptr, ptr, %union.anon.9, ptr, ptr, ptr, ptr, ptr, i32 }
+%struct.Warps_Struct = type { i16, ptr }
+%struct.Pattern_Struct = type { i16, i16, i16, i32, float, float, float, ptr, ptr, ptr, %union.anon.6 }
+%struct.Blend_Map_Struct = type { i16, i16, i16, i64, ptr }
 %struct.Blend_Map_Entry = type { float, i8, %union.anon }
 %union.anon = type { [2 x double], [8 x i8] }
 %union.anon.6 = type { %struct.anon.7 }
 %struct.anon.7 = type { float, [3 x double] }
 %union.anon.9 = type { %struct.anon.10 }
 %struct.anon.10 = type { float, [3 x double] }
-%struct.Pigment_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.0, [5 x float] }
+%struct.Pigment_Struct = type { i16, i16, i16, i32, float, float, float, ptr, ptr, ptr, %union.anon.0, [5 x float] }
 %union.anon.0 = type { %struct.anon }
 %struct.anon = type { float, [3 x double] }
-%struct.Tnormal_Struct = type { i16, i16, i16, i32, float, float, float, %struct.Warps_Struct*, %struct.Pattern_Struct*, %struct.Blend_Map_Struct*, %union.anon.3, float }
+%struct.Tnormal_Struct = type { i16, i16, i16, i32, float, float, float, ptr, ptr, ptr, %union.anon.3, float }
 %union.anon.3 = type { %struct.anon.4 }
 %struct.anon.4 = type { float, [3 x double] }
 %struct.Finish_Struct = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, [3 x float], [3 x float] }
-%struct.Interior_Struct = type { i64, i32, float, float, float, float, float, %struct.Media_Struct* }
-%struct.Media_Struct = type { i32, i32, i32, i32, i32, double, double, i32, i32, i32, i32, [5 x float], [5 x float], [5 x float], [5 x float], double, double, double, double*, %struct.Pigment_Struct*, %struct.Media_Struct* }
+%struct.Interior_Struct = type { i64, i32, float, float, float, float, float, ptr }
+%struct.Media_Struct = type { i32, i32, i32, i32, i32, double, double, i32, i32, i32, i32, [5 x float], [5 x float], [5 x float], [5 x float], double, double, double, ptr, ptr, ptr }
 %struct.Bounding_Box_Struct = type { [3 x float], [3 x float] }
-%struct.Ray_Struct = type { [3 x double], [3 x double], i32, [100 x %struct.Interior_Struct*] }
-%struct.istack_struct = type { %struct.istack_struct*, %struct.istk_entry*, i32 }
-%struct.istk_entry = type { double, [3 x double], [3 x double], %struct.Object_Struct*, i32, i32, double, double, i8* }
+%struct.Ray_Struct = type { [3 x double], [3 x double], i32, [100 x ptr] }
+%struct.istack_struct = type { ptr, ptr, i32 }
+%struct.istk_entry = type { double, [3 x double], [3 x double], ptr, i32, i32, double, double, ptr }
 %struct.Transform_Struct = type { [4 x [4 x double]], [4 x [4 x double]] }
-%struct.Bezier_Node_Struct = type { i32, [3 x double], double, i32, i8* }
+%struct.Bezier_Node_Struct = type { i32, [3 x double], double, i32, ptr }
 
-define void @Precompute_Patch_Values(%struct.Bicubic_Patch_Struct* %Shape) {
+define void @Precompute_Patch_Values(ptr %Shape) {
 ; CHECK: Precompute_Patch_Values
 ; CHECK: ldr [[VAL2:q[0-9]+]], [x0, #272]
 ; CHECK-NEXT: ldr [[VAL:x[0-9]+]], [x0, #288]
@@ -39,13 +39,11 @@ define void @Precompute_Patch_Values(%struct.Bicubic_Patch_Struct* %Shape) {
 ; CHECK-NEXT: str [[VAL]], [sp, #232]
 entry:
   %Control_Points = alloca [16 x [3 x double]], align 8
-  %arraydecay5.3.1 = getelementptr inbounds [16 x [3 x double]], [16 x [3 x double]]* %Control_Points, i64 0, i64 9, i64 0
-  %tmp14 = bitcast double* %arraydecay5.3.1 to i8*
-  %arraydecay11.3.1 = getelementptr inbounds %struct.Bicubic_Patch_Struct, %struct.Bicubic_Patch_Struct* %Shape, i64 0, i32 12, i64 1, i64 3, i64 0
-  %tmp15 = bitcast double* %arraydecay11.3.1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i1 false)
+  %arraydecay5.3.1 = getelementptr inbounds [16 x [3 x double]], ptr %Control_Points, i64 0, i64 9, i64 0
+  %arraydecay11.3.1 = getelementptr inbounds %struct.Bicubic_Patch_Struct, ptr %Shape, i64 0, i32 12, i64 1, i64 3, i64 0
+  call void @llvm.memcpy.p0.p0.i64(ptr %arraydecay5.3.1, ptr %arraydecay11.3.1, i64 24, i1 false)
   ret void
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vmax.ll b/llvm/test/CodeGen/AArch64/arm64-vmax.ll
index e8a302047a0f2..de24544f9270b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmax.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmax.ll
@@ -1,80 +1,80 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @smax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @smax_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smax_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smax.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.smax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @smax_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @smax_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smax_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smax.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.smax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @smax_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @smax_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smax_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smax.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.smax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @smax_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @smax_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smax_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smax.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @smax_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @smax_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smax_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smax.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @smax_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @smax_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smax_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smax.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -86,80 +86,80 @@ declare <8 x i16> @llvm.aarch64.neon.smax.v8i16(<8 x i16>, <8 x i16>) nounwind r
 declare <2 x i32> @llvm.aarch64.neon.smax.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.smax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @umax_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @umax_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umax_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umax.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.umax.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @umax_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @umax_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umax_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umax.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.umax.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @umax_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @umax_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umax_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umax.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.umax.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @umax_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @umax_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umax_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umax.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @umax_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @umax_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umax_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umax.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @umax_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @umax_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umax_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umax.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -171,80 +171,80 @@ declare <8 x i16> @llvm.aarch64.neon.umax.v8i16(<8 x i16>, <8 x i16>) nounwind r
 declare <2 x i32> @llvm.aarch64.neon.umax.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.umax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @smin_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @smin_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smin_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smin.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @smin_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @smin_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smin_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smin.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @smin_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @smin_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smin_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smin.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @smin_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @smin_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smin_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smin.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @smin_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @smin_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smin_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smin.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @smin_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @smin_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smin_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smin.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -256,80 +256,80 @@ declare <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16>, <8 x i16>) nounwind r
 declare <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @umin_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @umin_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umin_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umin.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @umin_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @umin_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umin_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umin.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @umin_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @umin_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umin_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umin.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @umin_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @umin_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umin_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umin.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @umin_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @umin_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umin_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umin.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @umin_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @umin_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umin_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umin.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -343,80 +343,80 @@ declare <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32>, <4 x i32>) nounwind r
 
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @smaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @smaxp_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smaxp_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smaxp.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.smaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @smaxp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @smaxp_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smaxp_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smaxp.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.smaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @smaxp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @smaxp_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smaxp_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smaxp.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.smaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @smaxp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @smaxp_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smaxp_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smaxp.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.smaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @smaxp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @smaxp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smaxp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smaxp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.smaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @smaxp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @smaxp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smaxp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    smaxp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.smaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -428,80 +428,80 @@ declare <8 x i16> @llvm.aarch64.neon.smaxp.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <2 x i32> @llvm.aarch64.neon.smaxp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.smaxp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @umaxp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @umaxp_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umaxp_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umaxp.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.umaxp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @umaxp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @umaxp_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umaxp_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umaxp.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.umaxp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @umaxp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @umaxp_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umaxp_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umaxp.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.umaxp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @umaxp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @umaxp_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umaxp_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umaxp.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.umaxp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @umaxp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @umaxp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umaxp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umaxp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.umaxp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @umaxp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @umaxp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umaxp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    umaxp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -515,80 +515,80 @@ declare <4 x i32> @llvm.aarch64.neon.umaxp.v4i32(<4 x i32>, <4 x i32>) nounwind
 
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @sminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @sminp_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sminp_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sminp.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @sminp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @sminp_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sminp_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sminp.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @sminp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sminp_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sminp_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sminp.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @sminp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sminp_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sminp_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sminp.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @sminp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sminp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sminp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sminp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @sminp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sminp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sminp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sminp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -600,80 +600,80 @@ declare <8 x i16> @llvm.aarch64.neon.sminp.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <2 x i32> @llvm.aarch64.neon.sminp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.sminp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <8 x i8> @uminp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uminp_8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uminp_8b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uminp.8b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uminp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <16 x i8> @uminp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uminp_16b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uminp_16b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uminp.16b v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uminp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <4 x i16> @uminp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uminp_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uminp_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uminp.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uminp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @uminp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uminp_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uminp_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uminp.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uminp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @uminp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uminp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uminp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    uminp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uminp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @uminp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uminp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: uminp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    uminp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uminp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
@@ -685,41 +685,41 @@ declare <8 x i16> @llvm.aarch64.neon.uminp.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <2 x i32> @llvm.aarch64.neon.uminp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.uminp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
-define <2 x float> @fmax_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fmax_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmax_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fmax.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @fmax_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fmax_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmax_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmax.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @fmax_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fmax_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmax_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmax.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -728,41 +728,41 @@ declare <2 x float> @llvm.aarch64.neon.fmax.v2f32(<2 x float>, <2 x float>) noun
 declare <4 x float> @llvm.aarch64.neon.fmax.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.fmax.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @fmaxp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fmaxp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmaxp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fmaxp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @fmaxp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fmaxp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmaxp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmaxp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @fmaxp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fmaxp_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmaxp_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmaxp.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -771,41 +771,41 @@ declare <2 x float> @llvm.aarch64.neon.fmaxp.v2f32(<2 x float>, <2 x float>) nou
 declare <4 x float> @llvm.aarch64.neon.fmaxp.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.fmaxp.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @fmin_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fmin_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmin_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fmin.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @fmin_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fmin_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmin_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmin.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @fmin_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fmin_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmin_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmin.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -814,41 +814,41 @@ declare <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float>, <2 x float>) noun
 declare <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @fminp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fminp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fminp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fminp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fminp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @fminp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fminp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fminp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fminp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fminp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @fminp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fminp_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fminp_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fminp.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fminp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -857,41 +857,41 @@ declare <2 x float> @llvm.aarch64.neon.fminp.v2f32(<2 x float>, <2 x float>) nou
 declare <4 x float> @llvm.aarch64.neon.fminp.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.fminp.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @fminnmp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fminnmp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fminnmp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fminnmp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fminnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @fminnmp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fminnmp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fminnmp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fminnmp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fminnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @fminnmp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fminnmp_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fminnmp_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fminnmp.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fminnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -900,41 +900,41 @@ declare <2 x float> @llvm.aarch64.neon.fminnmp.v2f32(<2 x float>, <2 x float>) n
 declare <4 x float> @llvm.aarch64.neon.fminnmp.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.fminnmp.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @fmaxnmp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fmaxnmp_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmaxnmp_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fmaxnmp.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.fmaxnmp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @fmaxnmp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fmaxnmp_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmaxnmp_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmaxnmp.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.fmaxnmp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @fmaxnmp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fmaxnmp_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmaxnmp_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmaxnmp.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.fmaxnmp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index c95d680ec6740..0a29d6b86659e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -1,41 +1,41 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mattr=+aes | FileCheck %s
 
-define <8 x i16> @smull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @smull8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @smull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @smull4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @smull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i64> %tmp3
 }
@@ -44,41 +44,41 @@ declare <8 x i16>  @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind r
 declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
 
-define <8 x i16> @umull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @umull8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @umull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @umull4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @umull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i64> %tmp3
 }
@@ -87,56 +87,56 @@ declare <8 x i16>  @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) nounwind r
 declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
 
-define <4 x i32> @sqdmull4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @sqdmull4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmull.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqdmull2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @sqdmull2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmull.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i64> %tmp3
 }
 
-define <4 x i32> @sqdmull2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @sqdmull2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull2_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    sqdmull.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqdmull2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @sqdmull2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull2_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1, #8]
 ; CHECK-NEXT:    sqdmull.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -147,74 +147,74 @@ define <2 x i64> @sqdmull2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
 declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
 
-define <8 x i16> @pmull8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: pmull8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    pmull.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
   ret <8 x i16> %tmp3
 }
 
 declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
 
-define <4 x i16> @sqdmulh_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmulh.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @sqdmulh_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqdmulh_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqdmulh.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
   ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @sqdmulh_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqdmulh_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmulh.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @sqdmulh_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqdmulh_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqdmulh.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
   ret <4 x i32> %tmp3
 }
 
-define i32 @sqdmulh_1s(i32* %A, i32* %B) nounwind {
+define i32 @sqdmulh_1s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_1s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -224,8 +224,8 @@ define i32 @sqdmulh_1s(i32* %A, i32* %B) nounwind {
 ; CHECK-NEXT:    sqdmulh s0, s0, s1
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %A
-  %tmp2 = load i32, i32* %B
+  %tmp1 = load i32, ptr %A
+  %tmp2 = load i32, ptr %B
   %tmp3 = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %tmp1, i32 %tmp2)
   ret i32 %tmp3
 }
@@ -236,59 +236,59 @@ declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>) nounwin
 declare <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare i32 @llvm.aarch64.neon.sqdmulh.i32(i32, i32) nounwind readnone
 
-define <4 x i16> @sqrdmulh_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqrdmulh_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqrdmulh.4h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i16> %tmp3
 }
 
-define <8 x i16> @sqrdmulh_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqrdmulh_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqrdmulh.8h v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
   ret <8 x i16> %tmp3
 }
 
-define <2 x i32> @sqrdmulh_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqrdmulh_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqrdmulh.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @sqrdmulh_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqrdmulh_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqrdmulh.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
   ret <4 x i32> %tmp3
 }
 
-define i32 @sqrdmulh_1s(i32* %A, i32* %B) nounwind {
+define i32 @sqrdmulh_1s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_1s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -298,8 +298,8 @@ define i32 @sqrdmulh_1s(i32* %A, i32* %B) nounwind {
 ; CHECK-NEXT:    sqrdmulh s0, s0, s1
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load i32, i32* %A
-  %tmp2 = load i32, i32* %B
+  %tmp1 = load i32, ptr %A
+  %tmp2 = load i32, ptr %B
   %tmp3 = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %tmp1, i32 %tmp2)
   ret i32 %tmp3
 }
@@ -310,41 +310,41 @@ declare <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32>, <2 x i32>) nounwi
 declare <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare i32 @llvm.aarch64.neon.sqrdmulh.i32(i32, i32) nounwind readnone
 
-define <2 x float> @fmulx_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fmulx_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmulx_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fmulx.2s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
   %tmp3 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
   ret <2 x float> %tmp3
 }
 
-define <4 x float> @fmulx_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fmulx_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmulx_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmulx.4s v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
   %tmp3 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
   ret <4 x float> %tmp3
 }
 
-define <2 x double> @fmulx_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fmulx_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmulx_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmulx.2d v0, v0, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
   %tmp3 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
   ret <2 x double> %tmp3
 }
@@ -353,7 +353,7 @@ declare <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float>, <2 x float>) nou
 declare <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <4 x i32> @smlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @smlal4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlal4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -361,15 +361,15 @@ define <4 x i32> @smlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlal.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @smlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @smlal2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlal2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -377,15 +377,15 @@ define <2 x i64> @smlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlal.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = add <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
 }
 
-define void @smlal8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+define void @smlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
 ; CHECK-LABEL: smlal8h_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v3, #1
@@ -399,11 +399,11 @@ define void @smlal8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8>
   %add.1 = add <8 x i16> %smull.1, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>
   %smull.2 = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %v2, <8 x i8> %xor)
   %add.2 = add <8 x i16> %add.1, %smull.2
-  store <8 x i16> %add.2, <8 x i16>* %dst
+  store <8 x i16> %add.2, ptr %dst
   ret void
 }
 
-define void @smlal2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
+define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
 ; CHECK-LABEL: smlal2d_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #257
@@ -418,11 +418,11 @@ define void @smlal2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i3
   %add.1 = add <2 x i64> %smull.1, <i64 257, i64 257>
   %smull.2 = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %v2, <2 x i32> %xor)
   %add.2 = add <2 x i64> %add.1, %smull.2
-  store <2 x i64> %add.2, <2 x i64>* %dst
+  store <2 x i64> %add.2, ptr %dst
   ret void
 }
 
-define <4 x i32> @smlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @smlsl4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlsl4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -430,15 +430,15 @@ define <4 x i32> @smlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlsl.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @smlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @smlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlsl2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -446,15 +446,15 @@ define <2 x i64> @smlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlsl.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
 }
 
-define void @smlsl8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+define void @smlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
 ; CHECK-LABEL: smlsl8h_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smull.8h v0, v0, v2
@@ -469,11 +469,11 @@ define void @smlsl8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8>
   %sub.1 = sub <8 x i16> <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>, %smull.1
   %smull.2 = tail call <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8> %v2, <8 x i8> %xor)
   %sub.2 = sub <8 x i16> %sub.1, %smull.2
-  store <8 x i16> %sub.2, <8 x i16>* %dst
+  store <8 x i16> %sub.2, ptr %dst
   ret void
 }
 
-define void @smlsl2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
+define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
 ; CHECK-LABEL: smlsl2d_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    smull.2d v0, v0, v2
@@ -489,7 +489,7 @@ define void @smlsl2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i3
   %sub.1 = sub <2 x i64> <i64 257, i64 257>, %smull.1
   %smull.2 = tail call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %v2, <2 x i32> %xor)
   %sub.2 = sub <2 x i64> %sub.1, %smull.2
-  store <2 x i64> %sub.2, <2 x i64>* %dst
+  store <2 x i64> %sub.2, ptr %dst
   ret void
 }
 
@@ -498,7 +498,7 @@ declare <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64>, <2 x i64>)
 declare <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32>, <4 x i32>)
 declare <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64>, <2 x i64>)
 
-define <4 x i32> @sqdmlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlal4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -506,15 +506,15 @@ define <4 x i32> @sqdmlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwin
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlal.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @sqdmlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlal2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -522,15 +522,15 @@ define <2 x i64> @sqdmlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwin
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlal.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
   ret <2 x i64> %tmp5
 }
 
-define <4 x i32> @sqdmlal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal2_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -538,9 +538,9 @@ define <4 x i32> @sqdmlal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounw
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    sqdmlal.4s v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -548,7 +548,7 @@ define <4 x i32> @sqdmlal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounw
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @sqdmlal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal2_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -556,9 +556,9 @@ define <2 x i64> @sqdmlal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounw
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    sqdmlal.2d v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -566,7 +566,7 @@ define <2 x i64> @sqdmlal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounw
   ret <2 x i64> %tmp5
 }
 
-define <4 x i32> @sqdmlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlsl4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -574,15 +574,15 @@ define <4 x i32> @sqdmlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwin
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlsl.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp4)
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @sqdmlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -590,15 +590,15 @@ define <2 x i64> @sqdmlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwin
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlsl.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp4)
   ret <2 x i64> %tmp5
 }
 
-define <4 x i32> @sqdmlsl2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlsl2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl2_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -606,9 +606,9 @@ define <4 x i32> @sqdmlsl2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounw
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    sqdmlsl.4s v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -616,7 +616,7 @@ define <4 x i32> @sqdmlsl2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounw
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @sqdmlsl2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlsl2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl2_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -624,9 +624,9 @@ define <2 x i64> @sqdmlsl2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounw
 ; CHECK-NEXT:    ldr d2, [x1, #8]
 ; CHECK-NEXT:    sqdmlsl.2d v0, v1, v2
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -634,7 +634,7 @@ define <2 x i64> @sqdmlsl2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounw
   ret <2 x i64> %tmp5
 }
 
-define <4 x i32> @umlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @umlal4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlal4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -642,15 +642,15 @@ define <4 x i32> @umlal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlal.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = add <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @umlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @umlal2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlal2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -658,15 +658,15 @@ define <2 x i64> @umlal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlal.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = add <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
 }
 
-define void @umlal8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+define void @umlal8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
 ; CHECK-LABEL: umlal8h_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi.16b v3, #1
@@ -680,11 +680,11 @@ define void @umlal8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8>
   %add.1 = add <8 x i16> %umull.1, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>
   %umull.2 = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %v2, <8 x i8> %xor)
   %add.2 = add <8 x i16> %add.1, %umull.2
-  store <8 x i16> %add.2, <8 x i16>* %dst
+  store <8 x i16> %add.2, ptr %dst
   ret void
 }
 
-define void @umlal2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
+define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
 ; CHECK-LABEL: umlal2d_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #257
@@ -699,11 +699,11 @@ define void @umlal2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i3
   %add.1 = add <2 x i64> %umull.1, <i64 257, i64 257>
   %umull.2 = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %v2, <2 x i32> %xor)
   %add.2 = add <2 x i64> %add.1, %umull.2
-  store <2 x i64> %add.2, <2 x i64>* %dst
+  store <2 x i64> %add.2, ptr %dst
   ret void
 }
 
-define <4 x i32> @umlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @umlsl4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlsl4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -711,15 +711,15 @@ define <4 x i32> @umlsl4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlsl.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @umlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @umlsl2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlsl2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -727,15 +727,15 @@ define <2 x i64> @umlsl2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlsl.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
 }
 
-define void @umlsl8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+define void @umlsl8h_chain_with_constant(ptr %dst, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
 ; CHECK-LABEL: umlsl8h_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull.8h v0, v0, v2
@@ -750,11 +750,11 @@ define void @umlsl8h_chain_with_constant(<8 x i16>* %dst, <8 x i8> %v1, <8 x i8>
   %add.1 = sub <8 x i16> <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>, %umull.1
   %umull.2 = tail call <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8> %v2, <8 x i8> %xor)
   %add.2 = sub <8 x i16> %add.1, %umull.2
-  store <8 x i16> %add.2, <8 x i16>* %dst
+  store <8 x i16> %add.2, ptr %dst
   ret void
 }
 
-define void @umlsl2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
+define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
 ; CHECK-LABEL: umlsl2d_chain_with_constant:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull.2d v0, v0, v2
@@ -770,11 +770,11 @@ define void @umlsl2d_chain_with_constant(<2 x i64>* %dst, <2 x i32> %v1, <2 x i3
   %add.1 = sub <2 x i64> <i64 257, i64 257>, %umull.1
   %umull.2 = tail call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %v2, <2 x i32> %xor)
   %add.2 = sub <2 x i64> %add.1, %umull.2
-  store <2 x i64> %add.2, <2 x i64>* %dst
+  store <2 x i64> %add.2, ptr %dst
   ret void
 }
 
-define <2 x float> @fmla_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+define <2 x float> @fmla_2s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmla_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -782,14 +782,14 @@ define <2 x float> @fmla_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) n
 ; CHECK-NEXT:    ldr d0, [x2]
 ; CHECK-NEXT:    fmla.2s v0, v1, v2
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
-  %tmp3 = load <2 x float>, <2 x float>* %C
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
+  %tmp3 = load <2 x float>, ptr %C
   %tmp4 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
   ret <2 x float> %tmp4
 }
 
-define <4 x float> @fmla_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+define <4 x float> @fmla_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmla_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x1]
@@ -797,14 +797,14 @@ define <4 x float> @fmla_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) n
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    fmla.4s v0, v1, v2
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
-  %tmp3 = load <4 x float>, <4 x float>* %C
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
+  %tmp3 = load <4 x float>, ptr %C
   %tmp4 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
   ret <4 x float> %tmp4
 }
 
-define <2 x double> @fmla_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+define <2 x double> @fmla_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmla_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x1]
@@ -812,9 +812,9 @@ define <2 x double> @fmla_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    fmla.2d v0, v1, v2
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
-  %tmp3 = load <2 x double>, <2 x double>* %C
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
+  %tmp3 = load <2 x double>, ptr %C
   %tmp4 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
   ret <2 x double> %tmp4
 }
@@ -823,7 +823,7 @@ declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) nounw
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @fmls_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+define <2 x float> @fmls_2s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmls_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -831,15 +831,15 @@ define <2 x float> @fmls_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) n
 ; CHECK-NEXT:    ldr d0, [x2]
 ; CHECK-NEXT:    fmls.2s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
-  %tmp3 = load <2 x float>, <2 x float>* %C
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
+  %tmp3 = load <2 x float>, ptr %C
   %tmp4 = fsub <2 x float> <float -0.0, float -0.0>, %tmp2
   %tmp5 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp1, <2 x float> %tmp4, <2 x float> %tmp3)
   ret <2 x float> %tmp5
 }
 
-define <4 x float> @fmls_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+define <4 x float> @fmls_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmls_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x1]
@@ -847,15 +847,15 @@ define <4 x float> @fmls_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) n
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    fmls.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
-  %tmp3 = load <4 x float>, <4 x float>* %C
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
+  %tmp3 = load <4 x float>, ptr %C
   %tmp4 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %tmp2
   %tmp5 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp1, <4 x float> %tmp4, <4 x float> %tmp3)
   ret <4 x float> %tmp5
 }
 
-define <2 x double> @fmls_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+define <2 x double> @fmls_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmls_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x1]
@@ -863,15 +863,15 @@ define <2 x double> @fmls_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    fmls.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
-  %tmp3 = load <2 x double>, <2 x double>* %C
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
+  %tmp3 = load <2 x double>, ptr %C
   %tmp4 = fsub <2 x double> <double -0.0, double -0.0>, %tmp2
   %tmp5 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp1, <2 x double> %tmp4, <2 x double> %tmp3)
   ret <2 x double> %tmp5
 }
 
-define <2 x float> @fmls_commuted_neg_2s(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
+define <2 x float> @fmls_commuted_neg_2s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmls_commuted_neg_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -879,15 +879,15 @@ define <2 x float> @fmls_commuted_neg_2s(<2 x float>* %A, <2 x float>* %B, <2 x
 ; CHECK-NEXT:    ldr d0, [x2]
 ; CHECK-NEXT:    fmls.2s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
-  %tmp3 = load <2 x float>, <2 x float>* %C
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
+  %tmp3 = load <2 x float>, ptr %C
   %tmp4 = fsub <2 x float> <float -0.0, float -0.0>, %tmp2
   %tmp5 = call <2 x float> @llvm.fma.v2f32(<2 x float> %tmp4, <2 x float> %tmp1, <2 x float> %tmp3)
   ret <2 x float> %tmp5
 }
 
-define <4 x float> @fmls_commuted_neg_4s(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+define <4 x float> @fmls_commuted_neg_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmls_commuted_neg_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x1]
@@ -895,15 +895,15 @@ define <4 x float> @fmls_commuted_neg_4s(<4 x float>* %A, <4 x float>* %B, <4 x
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    fmls.4s v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
-  %tmp3 = load <4 x float>, <4 x float>* %C
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
+  %tmp3 = load <4 x float>, ptr %C
   %tmp4 = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %tmp2
   %tmp5 = call <4 x float> @llvm.fma.v4f32(<4 x float> %tmp4, <4 x float> %tmp1, <4 x float> %tmp3)
   ret <4 x float> %tmp5
 }
 
-define <2 x double> @fmls_commuted_neg_2d(<2 x double>* %A, <2 x double>* %B, <2 x double>* %C) nounwind {
+define <2 x double> @fmls_commuted_neg_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: fmls_commuted_neg_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x1]
@@ -911,9 +911,9 @@ define <2 x double> @fmls_commuted_neg_2d(<2 x double>* %A, <2 x double>* %B, <2
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    fmls.2d v0, v2, v1
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
-  %tmp3 = load <2 x double>, <2 x double>* %C
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
+  %tmp3 = load <2 x double>, ptr %C
   %tmp4 = fsub <2 x double> <double -0.0, double -0.0>, %tmp2
   %tmp5 = call <2 x double> @llvm.fma.v2f64(<2 x double> %tmp4, <2 x double> %tmp1, <2 x double> %tmp3)
   ret <2 x double> %tmp5
@@ -1081,57 +1081,57 @@ declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x f
 declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
 
-define <4 x i16> @mul_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @mul_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: mul_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    mul.4h v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = mul <4 x i16> %tmp1, %tmp3
   ret <4 x i16> %tmp4
 }
 
-define <8 x i16> @mul_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @mul_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: mul_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    mul.8h v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %tmp4 = mul <8 x i16> %tmp1, %tmp3
   ret <8 x i16> %tmp4
 }
 
-define <2 x i32> @mul_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @mul_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: mul_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    mul.2s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = mul <2 x i32> %tmp1, %tmp3
   ret <2 x i32> %tmp4
 }
 
-define <4 x i32> @mul_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @mul_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: mul_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    mul.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = mul <4 x i32> %tmp1, %tmp3
   ret <4 x i32> %tmp4
@@ -1153,43 +1153,43 @@ define <2 x i64> @mul_2d(<2 x i64> %A, <2 x i64> %B) nounwind {
   ret <2 x i64> %tmp1
 }
 
-define <2 x float> @fmul_lane_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fmul_lane_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmul_lane_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fmul.2s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
   %tmp3 = shufflevector <2 x float> %tmp2, <2 x float> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = fmul <2 x float> %tmp1, %tmp3
   ret <2 x float> %tmp4
 }
 
-define <4 x float> @fmul_lane_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fmul_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmul_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmul.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
   %tmp3 = shufflevector <4 x float> %tmp2, <4 x float> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = fmul <4 x float> %tmp1, %tmp3
   ret <4 x float> %tmp4
 }
 
-define <2 x double> @fmul_lane_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fmul_lane_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmul_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmul.2d v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
   %tmp3 = shufflevector <2 x double> %tmp2, <2 x double> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = fmul <2 x double> %tmp1, %tmp3
   ret <2 x double> %tmp4
@@ -1217,99 +1217,99 @@ define double @fmul_lane_d(double %A, <2 x double> %vec) nounwind {
 
 
 
-define <2 x float> @fmulx_lane_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @fmulx_lane_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmulx_lane_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    fmulx.2s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x float>, <2 x float>* %A
-  %tmp2 = load <2 x float>, <2 x float>* %B
+  %tmp1 = load <2 x float>, ptr %A
+  %tmp2 = load <2 x float>, ptr %B
   %tmp3 = shufflevector <2 x float> %tmp2, <2 x float> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x float> @llvm.aarch64.neon.fmulx.v2f32(<2 x float> %tmp1, <2 x float> %tmp3)
   ret <2 x float> %tmp4
 }
 
-define <4 x float> @fmulx_lane_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @fmulx_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmulx_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmulx.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x float>, <4 x float>* %A
-  %tmp2 = load <4 x float>, <4 x float>* %B
+  %tmp1 = load <4 x float>, ptr %A
+  %tmp2 = load <4 x float>, ptr %B
   %tmp3 = shufflevector <4 x float> %tmp2, <4 x float> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x float> @llvm.aarch64.neon.fmulx.v4f32(<4 x float> %tmp1, <4 x float> %tmp3)
   ret <4 x float> %tmp4
 }
 
-define <2 x double> @fmulx_lane_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @fmulx_lane_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: fmulx_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    fmulx.2d v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x double>, <2 x double>* %A
-  %tmp2 = load <2 x double>, <2 x double>* %B
+  %tmp1 = load <2 x double>, ptr %A
+  %tmp2 = load <2 x double>, ptr %B
   %tmp3 = shufflevector <2 x double> %tmp2, <2 x double> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x double> @llvm.aarch64.neon.fmulx.v2f64(<2 x double> %tmp1, <2 x double> %tmp3)
   ret <2 x double> %tmp4
 }
 
-define <4 x i16> @sqdmulh_lane_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqdmulh_lane_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_lane_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmulh.4h v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i16> %tmp4
 }
 
-define <8 x i16> @sqdmulh_lane_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqdmulh_lane_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_lane_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqdmulh.8h v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp3)
   ret <8 x i16> %tmp4
 }
 
-define <2 x i32> @sqdmulh_lane_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqdmulh_lane_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_lane_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmulh.2s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i32> %tmp4
 }
 
-define <4 x i32> @sqdmulh_lane_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqdmulh_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmulh_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqdmulh.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp3)
   ret <4 x i32> %tmp4
@@ -1327,57 +1327,57 @@ define i32 @sqdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind {
   ret i32 %tmp2
 }
 
-define <4 x i16> @sqrdmulh_lane_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqrdmulh_lane_4h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_lane_4h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqrdmulh.4h v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i16> %tmp4
 }
 
-define <8 x i16> @sqrdmulh_lane_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqrdmulh_lane_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_lane_8h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqrdmulh.8h v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp1 = load <8 x i16>, ptr %A
+  %tmp2 = load <8 x i16>, ptr %B
   %tmp3 = shufflevector <8 x i16> %tmp2, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp3)
   ret <8 x i16> %tmp4
 }
 
-define <2 x i32> @sqrdmulh_lane_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqrdmulh_lane_2s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_lane_2s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqrdmulh.2s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i32> %tmp4
 }
 
-define <4 x i32> @sqrdmulh_lane_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqrdmulh_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqrdmulh_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    sqrdmulh.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp1 = load <4 x i32>, ptr %A
+  %tmp2 = load <4 x i32>, ptr %B
   %tmp3 = shufflevector <4 x i32> %tmp2, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp3)
   ret <4 x i32> %tmp4
@@ -1395,121 +1395,121 @@ define i32 @sqrdmulh_lane_1s(i32 %A, <4 x i32> %B) nounwind {
   ret i32 %tmp2
 }
 
-define <4 x i32> @sqdmull_lane_4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @sqdmull_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmull.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @sqdmull_lane_2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @sqdmull_lane_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmull.2d v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i64> %tmp4
 }
 
-define <4 x i32> @sqdmull2_lane_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @sqdmull2_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull2_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmull.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
   ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @sqdmull2_lane_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @sqdmull2_lane_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sqdmull2_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #8]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    sqdmull.2d v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
   ret <2 x i64> %tmp4
 }
 
-define <4 x i32> @umull_lane_4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @umull_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @umull_lane_2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @umull_lane_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: umull_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    umull.2d v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i64> %tmp4
 }
 
-define <4 x i32> @smull_lane_4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @smull_lane_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull.4s v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp4 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp3)
   ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @smull_lane_2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @smull_lane_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    smull.2d v0, v0, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp4 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp3)
   ret <2 x i64> %tmp4
 }
 
-define <4 x i32> @smlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @smlal_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlal_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1517,16 +1517,16 @@ define <4 x i32> @smlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlal.4s v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = add <4 x i32> %tmp3, %tmp5
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @smlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @smlal_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlal_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1534,16 +1534,16 @@ define <2 x i64> @smlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlal.2d v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = add <2 x i64> %tmp3, %tmp5
   ret <2 x i64> %tmp6
 }
 
-define <4 x i32> @sqdmlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlal_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1551,16 +1551,16 @@ define <4 x i32> @sqdmlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) n
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlal.4s v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @sqdmlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlal_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1568,16 +1568,16 @@ define <2 x i64> @sqdmlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) n
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlal.2d v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
   ret <2 x i64> %tmp6
 }
 
-define <4 x i32> @sqdmlal2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlal2_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal2_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1585,9 +1585,9 @@ define <4 x i32> @sqdmlal2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C)
 ; CHECK-NEXT:    ldr d2, [x1]
 ; CHECK-NEXT:    sqdmlal.4s v0, v1, v2[1]
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1595,7 +1595,7 @@ define <4 x i32> @sqdmlal2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C)
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @sqdmlal2_lane_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlal2_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlal2_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1603,9 +1603,9 @@ define <2 x i64> @sqdmlal2_lane_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C)
 ; CHECK-NEXT:    ldr d2, [x1]
 ; CHECK-NEXT:    sqdmlal.2d v0, v1, v2[1]
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1715,7 +1715,7 @@ define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
 declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64)
 
 
-define <4 x i32> @umlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @umlal_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlal_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1723,16 +1723,16 @@ define <4 x i32> @umlal_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlal.4s v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = add <4 x i32> %tmp3, %tmp5
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @umlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @umlal_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlal_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1740,9 +1740,9 @@ define <2 x i64> @umlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlal.2d v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = add <2 x i64> %tmp3, %tmp5
@@ -1750,7 +1750,7 @@ define <2 x i64> @umlal_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nou
 }
 
 
-define <4 x i32> @smlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @smlsl_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlsl_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1758,16 +1758,16 @@ define <4 x i32> @smlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlsl.4s v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = sub <4 x i32> %tmp3, %tmp5
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @smlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @smlsl_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: smlsl_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1775,16 +1775,16 @@ define <2 x i64> @smlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    smlsl.2d v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = sub <2 x i64> %tmp3, %tmp5
   ret <2 x i64> %tmp6
 }
 
-define <4 x i32> @sqdmlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlsl_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1792,16 +1792,16 @@ define <4 x i32> @sqdmlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) n
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlsl.4s v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp3, <4 x i32> %tmp5)
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @sqdmlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlsl_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1809,16 +1809,16 @@ define <2 x i64> @sqdmlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) n
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    sqdmlsl.2d v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp3, <2 x i64> %tmp5)
   ret <2 x i64> %tmp6
 }
 
-define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @sqdmlsl2_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl2_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1826,9 +1826,9 @@ define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C)
 ; CHECK-NEXT:    ldr d2, [x1]
 ; CHECK-NEXT:    sqdmlsl.4s v0, v1, v2[1]
 ; CHECK-NEXT:    ret
-  %load1 = load <8 x i16>, <8 x i16>* %A
-  %load2 = load <8 x i16>, <8 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %load1 = load <8 x i16>, ptr %A
+  %load2 = load <8 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -1836,7 +1836,7 @@ define <4 x i32> @sqdmlsl2_lane_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C)
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @sqdmlsl2_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: sqdmlsl2_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x2]
@@ -1844,9 +1844,9 @@ define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C)
 ; CHECK-NEXT:    ldr d2, [x1]
 ; CHECK-NEXT:    sqdmlsl.2d v0, v1, v2[1]
 ; CHECK-NEXT:    ret
-  %load1 = load <4 x i32>, <4 x i32>* %A
-  %load2 = load <4 x i32>, <4 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %load1 = load <4 x i32>, ptr %A
+  %load2 = load <4 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -1854,7 +1854,7 @@ define <2 x i64> @sqdmlsl2_lane_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C)
   ret <2 x i64> %tmp6
 }
 
-define <4 x i32> @umlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
+define <4 x i32> @umlsl_lane_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlsl_lane_4s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1862,16 +1862,16 @@ define <4 x i32> @umlsl_lane_4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlsl.4s v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
-  %tmp3 = load <4 x i32>, <4 x i32>* %C
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
+  %tmp3 = load <4 x i32>, ptr %C
   %tmp4 = shufflevector <4 x i16> %tmp2, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   %tmp5 = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp4)
   %tmp6 = sub <4 x i32> %tmp3, %tmp5
   ret <4 x i32> %tmp6
 }
 
-define <2 x i64> @umlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
+define <2 x i64> @umlsl_lane_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-LABEL: umlsl_lane_2d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x1]
@@ -1879,9 +1879,9 @@ define <2 x i64> @umlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nou
 ; CHECK-NEXT:    ldr q0, [x2]
 ; CHECK-NEXT:    umlsl.2d v0, v2, v1[1]
 ; CHECK-NEXT:    ret
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
-  %tmp3 = load <2 x i64>, <2 x i64>* %C
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
+  %tmp3 = load <2 x i64>, ptr %C
   %tmp4 = shufflevector <2 x i32> %tmp2, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 1>
   %tmp5 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp4)
   %tmp6 = sub <2 x i64> %tmp3, %tmp5

diff  --git a/llvm/test/CodeGen/AArch64/arm64-volatile.ll b/llvm/test/CodeGen/AArch64/arm64-volatile.ll
index 66ecd6a3583d6..e20a15dd6904a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-volatile.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-volatile.ll
@@ -1,27 +1,27 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
-define i64 @normal_load(i64* nocapture %bar) nounwind readonly {
+define i64 @normal_load(ptr nocapture %bar) nounwind readonly {
 ; CHECK: normal_load
 ; CHECK: ldp
 ; CHECK-NEXT: add
 ; CHECK-NEXT: ret
-  %add.ptr = getelementptr inbounds i64, i64* %bar, i64 1
-  %tmp = load i64, i64* %add.ptr, align 8
-  %add.ptr1 = getelementptr inbounds i64, i64* %bar, i64 2
-  %tmp1 = load i64, i64* %add.ptr1, align 8
+  %add.ptr = getelementptr inbounds i64, ptr %bar, i64 1
+  %tmp = load i64, ptr %add.ptr, align 8
+  %add.ptr1 = getelementptr inbounds i64, ptr %bar, i64 2
+  %tmp1 = load i64, ptr %add.ptr1, align 8
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }
 
-define i64 @volatile_load(i64* nocapture %bar) nounwind {
+define i64 @volatile_load(ptr nocapture %bar) nounwind {
 ; CHECK: volatile_load
 ; CHECK: ldr
 ; CHECK-NEXT: ldr
 ; CHECK-NEXT: add
 ; CHECK-NEXT: ret
-  %add.ptr = getelementptr inbounds i64, i64* %bar, i64 1
-  %tmp = load volatile i64, i64* %add.ptr, align 8
-  %add.ptr1 = getelementptr inbounds i64, i64* %bar, i64 2
-  %tmp1 = load volatile i64, i64* %add.ptr1, align 8
+  %add.ptr = getelementptr inbounds i64, ptr %bar, i64 1
+  %tmp = load volatile i64, ptr %add.ptr, align 8
+  %add.ptr1 = getelementptr inbounds i64, ptr %bar, i64 2
+  %tmp1 = load volatile i64, ptr %add.ptr1, align 8
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vqadd.ll b/llvm/test/CodeGen/AArch64/arm64-vqadd.ll
index b7d61056ad9b4..df8864fe6ea32 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vqadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vqadd.ll
@@ -1,127 +1,127 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @sqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @sqadd8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqadd8b:
 ;CHECK: sqadd.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqadd4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqadd4h:
 ;CHECK: sqadd.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqadd2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqadd2s:
 ;CHECK: sqadd.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <8 x i8> @uqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uqadd8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqadd8b:
 ;CHECK: uqadd.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @uqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uqadd4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqadd4h:
 ;CHECK: uqadd.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uqadd2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqadd2s:
 ;CHECK: uqadd.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @sqadd16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqadd16b:
 ;CHECK: sqadd.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @sqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqadd8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqadd8h:
 ;CHECK: sqadd.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqadd4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqadd4s:
 ;CHECK: sqadd.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @sqadd2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqadd2d:
 ;CHECK: sqadd.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
 
-define <16 x i8> @uqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uqadd16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqadd16b:
 ;CHECK: uqadd.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @uqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uqadd8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqadd8h:
 ;CHECK: uqadd.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @uqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uqadd4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqadd4s:
 ;CHECK: uqadd.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @uqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @uqadd2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqadd2d:
 ;CHECK: uqadd.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -146,65 +146,65 @@ declare <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <4 x i32> @llvm.aarch64.neon.uqadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
-define <8 x i8> @usqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @usqadd8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usqadd8b:
 ;CHECK: usqadd.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.usqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @usqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @usqadd4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usqadd4h:
 ;CHECK: usqadd.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.usqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @usqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @usqadd2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usqadd2s:
 ;CHECK: usqadd.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.usqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @usqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @usqadd16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usqadd16b:
 ;CHECK: usqadd.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.usqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @usqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @usqadd8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usqadd8h:
 ;CHECK: usqadd.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @usqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @usqadd4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usqadd4s:
 ;CHECK: usqadd.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @usqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @usqadd2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usqadd2d:
 ;CHECK: usqadd.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
@@ -235,65 +235,65 @@ declare <8 x i16> @llvm.aarch64.neon.usqadd.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <4 x i32> @llvm.aarch64.neon.usqadd.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.usqadd.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
-define <8 x i8> @suqadd8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @suqadd8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: suqadd8b:
 ;CHECK: suqadd.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @suqadd4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @suqadd4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: suqadd4h:
 ;CHECK: suqadd.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @suqadd2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @suqadd2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: suqadd2s:
 ;CHECK: suqadd.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @suqadd16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @suqadd16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: suqadd16b:
 ;CHECK: suqadd.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @suqadd8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @suqadd8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: suqadd8h:
 ;CHECK: suqadd.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @suqadd4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @suqadd4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: suqadd4s:
 ;CHECK: suqadd.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @suqadd2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @suqadd2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: suqadd2d:
 ;CHECK: suqadd.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vqsub.ll b/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
index 77aac59d14196..dee21291fa149 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
@@ -1,127 +1,127 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @sqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @sqsub8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqsub8b:
 ;CHECK: sqsub.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqsub4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqsub4h:
 ;CHECK: sqsub.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqsub2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqsub2s:
 ;CHECK: sqsub.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <8 x i8> @uqsub8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uqsub8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqsub8b:
 ;CHECK: uqsub.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
 	ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @uqsub4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uqsub4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqsub4h:
 ;CHECK: uqsub.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
 	ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uqsub2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uqsub2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqsub2s:
 ;CHECK: uqsub.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
 	ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @sqsub16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqsub16b:
 ;CHECK: sqsub.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @sqsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqsub8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqsub8h:
 ;CHECK: sqsub.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sqsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqsub4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqsub4s:
 ;CHECK: sqsub.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqsub2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @sqsub2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqsub2d:
 ;CHECK: sqsub.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }
 
-define <16 x i8> @uqsub16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uqsub16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqsub16b:
 ;CHECK: uqsub.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
 	ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @uqsub8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uqsub8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqsub8h:
 ;CHECK: uqsub.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
 	ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @uqsub4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uqsub4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqsub4s:
 ;CHECK: uqsub.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
 	ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @uqsub2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @uqsub2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqsub2d:
 ;CHECK: uqsub.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
 	ret <2 x i64> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vselect.ll b/llvm/test/CodeGen/AArch64/arm64-vselect.ll
index b843b8393ebda..45fe47363fc6d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vselect.ll
@@ -4,7 +4,7 @@
 %T0_63 = type <4 x i16>
 %T1_63 = type <4 x i32>
 %T2_63 = type <4 x i1>
-define void @func63(%T1_63* %out, %T0_63 %v0, %T0_63 %v1, %T1_63 %v2, %T1_63 %v3) {
+define void @func63(ptr %out, %T0_63 %v0, %T0_63 %v1, %T1_63 %v2, %T1_63 %v3) {
 ; CHECK-LABEL: func63:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq.4h v0, v0, v1
@@ -14,6 +14,6 @@ define void @func63(%T1_63* %out, %T0_63 %v0, %T0_63 %v1, %T1_63 %v2, %T1_63 %v3
 ; CHECK-NEXT:    ret
   %cond = icmp eq %T0_63 %v0, %v1
   %r = select %T2_63 %cond, %T1_63 %v2, %T1_63 %v3
-  store %T1_63 %r, %T1_63* %out
+  store %T1_63 %r, ptr %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 07b257043426d..7805f2917715b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -1,195 +1,195 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -enable-misched=false | FileCheck %s
 
-define <8 x i8> @sqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @sqshl8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl8b:
 ;CHECK: sqshl.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqshl4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl4h:
 ;CHECK: sqshl.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqshl2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl2s:
 ;CHECK: sqshl.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
 
-define <1 x i64> @sqshl1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @sqshl1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl1d:
 ;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
-        %tmp2 = load <1 x i64>, <1 x i64>* %B
+        %tmp1 = load <1 x i64>, ptr %A
+        %tmp2 = load <1 x i64>, ptr %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
         ret <1 x i64> %tmp3
 }
 
-define <1 x i64> @sqshl1d_constant(<1 x i64>* %A) nounwind {
+define <1 x i64> @sqshl1d_constant(ptr %A) nounwind {
 ;CHECK-LABEL: sqshl1d_constant:
 ;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @sqshl_scalar(i64* %A, i64* %B) nounwind {
+define i64 @sqshl_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl_scalar:
 ;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, i64* %A
-        %tmp2 = load i64, i64* %B
+        %tmp1 = load i64, ptr %A
+        %tmp2 = load i64, ptr %B
         %tmp3 = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %tmp1, i64 %tmp2)
         ret i64 %tmp3
 }
 
-define i64 @sqshl_scalar_constant(i64* %A) nounwind {
+define i64 @sqshl_scalar_constant(ptr %A) nounwind {
 ;CHECK-LABEL: sqshl_scalar_constant:
 ;CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.sqshl.i64(i64 %tmp1, i64 1)
         ret i64 %tmp3
 }
 
-define <8 x i8> @uqshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uqshl8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl8b:
 ;CHECK: uqshl.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @uqshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uqshl4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl4h:
 ;CHECK: uqshl.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uqshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uqshl2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl2s:
 ;CHECK: uqshl.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @sqshl16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl16b:
 ;CHECK: sqshl.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @sqshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqshl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl8h:
 ;CHECK: sqshl.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sqshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqshl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl4s:
 ;CHECK: sqshl.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @sqshl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqshl2d:
 ;CHECK: sqshl.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
 
-define <16 x i8> @uqshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uqshl16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl16b:
 ;CHECK: uqshl.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @uqshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uqshl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl8h:
 ;CHECK: uqshl.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @uqshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uqshl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl4s:
 ;CHECK: uqshl.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @uqshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @uqshl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl2d:
 ;CHECK: uqshl.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @uqshl1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @uqshl1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl1d:
 ;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
-        %tmp2 = load <1 x i64>, <1 x i64>* %B
+        %tmp1 = load <1 x i64>, ptr %A
+        %tmp2 = load <1 x i64>, ptr %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
         ret <1 x i64> %tmp3
 }
 
-define <1 x i64> @uqshl1d_constant(<1 x i64>* %A) nounwind {
+define <1 x i64> @uqshl1d_constant(ptr %A) nounwind {
 ;CHECK-LABEL: uqshl1d_constant:
 ;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @uqshl_scalar(i64* %A, i64* %B) nounwind {
+define i64 @uqshl_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqshl_scalar:
 ;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, i64* %A
-        %tmp2 = load i64, i64* %B
+        %tmp1 = load i64, ptr %A
+        %tmp2 = load i64, ptr %B
         %tmp3 = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %tmp1, i64 %tmp2)
         ret i64 %tmp3
 }
 
-define i64 @uqshl_scalar_constant(i64* %A) nounwind {
+define i64 @uqshl_scalar_constant(ptr %A) nounwind {
 ;CHECK-LABEL: uqshl_scalar_constant:
 ;CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.uqshl.i64(i64 %tmp1, i64 1)
         ret i64 %tmp3
 }
@@ -217,204 +217,204 @@ declare <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
-define <8 x i8> @srshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @srshl8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl8b:
 ;CHECK: srshl.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @srshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @srshl4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl4h:
 ;CHECK: srshl.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @srshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @srshl2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl2s:
 ;CHECK: srshl.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
 
-define <1 x i64> @srshl1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @srshl1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl1d:
 ;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
-        %tmp2 = load <1 x i64>, <1 x i64>* %B
+        %tmp1 = load <1 x i64>, ptr %A
+        %tmp2 = load <1 x i64>, ptr %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
         ret <1 x i64> %tmp3
 }
 
-define <1 x i64> @srshl1d_constant(<1 x i64>* %A) nounwind {
+define <1 x i64> @srshl1d_constant(ptr %A) nounwind {
 ;CHECK-LABEL: srshl1d_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @srshl_scalar(i64* %A, i64* %B) nounwind {
+define i64 @srshl_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl_scalar:
 ;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, i64* %A
-        %tmp2 = load i64, i64* %B
+        %tmp1 = load i64, ptr %A
+        %tmp2 = load i64, ptr %B
         %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 %tmp2)
         ret i64 %tmp3
 }
 
-define i64 @srshl_scalar_constant(i64* %A) nounwind {
+define i64 @srshl_scalar_constant(ptr %A) nounwind {
 ;CHECK-LABEL: srshl_scalar_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: srshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 1)
         ret i64 %tmp3
 }
 
-define <8 x i8> @urshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @urshl8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl8b:
 ;CHECK: urshl.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @urshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @urshl4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl4h:
 ;CHECK: urshl.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @urshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @urshl2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl2s:
 ;CHECK: urshl.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
 
-define <1 x i64> @urshl1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @urshl1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl1d:
 ;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
-        %tmp2 = load <1 x i64>, <1 x i64>* %B
+        %tmp1 = load <1 x i64>, ptr %A
+        %tmp2 = load <1 x i64>, ptr %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
         ret <1 x i64> %tmp3
 }
 
-define <1 x i64> @urshl1d_constant(<1 x i64>* %A) nounwind {
+define <1 x i64> @urshl1d_constant(ptr %A) nounwind {
 ;CHECK-LABEL: urshl1d_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @urshl_scalar(i64* %A, i64* %B) nounwind {
+define i64 @urshl_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl_scalar:
 ;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, i64* %A
-        %tmp2 = load i64, i64* %B
+        %tmp1 = load i64, ptr %A
+        %tmp2 = load i64, ptr %B
         %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 %tmp2)
         ret i64 %tmp3
 }
 
-define i64 @urshl_scalar_constant(i64* %A) nounwind {
+define i64 @urshl_scalar_constant(ptr %A) nounwind {
 ;CHECK-LABEL: urshl_scalar_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: urshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 1)
         ret i64 %tmp3
 }
 
-define <16 x i8> @srshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @srshl16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl16b:
 ;CHECK: srshl.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @srshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @srshl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl8h:
 ;CHECK: srshl.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @srshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @srshl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl4s:
 ;CHECK: srshl.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @srshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @srshl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srshl2d:
 ;CHECK: srshl.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
 
-define <16 x i8> @urshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @urshl16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl16b:
 ;CHECK: urshl.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @urshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @urshl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl8h:
 ;CHECK: urshl.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @urshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @urshl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl4s:
 ;CHECK: urshl.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @urshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @urshl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: urshl2d:
 ;CHECK: urshl.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
@@ -441,204 +441,204 @@ declare <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
-define <8 x i8> @sqrshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @sqrshl8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl8b:
 ;CHECK: sqrshl.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqrshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sqrshl4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl4h:
 ;CHECK: sqrshl.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqrshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sqrshl2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl2s:
 ;CHECK: sqrshl.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
 
-define <8 x i8> @uqrshl8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @uqrshl8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl8b:
 ;CHECK: uqrshl.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshl.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @uqrshl4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @uqrshl4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl4h:
 ;CHECK: uqrshl.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshl.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uqrshl2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @uqrshl2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl2s:
 ;CHECK: uqrshl.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshl.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqrshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @sqrshl16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl16b:
 ;CHECK: sqrshl.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @sqrshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sqrshl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl8h:
 ;CHECK: sqrshl.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sqrshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sqrshl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl4s:
 ;CHECK: sqrshl.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqrshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @sqrshl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl2d:
 ;CHECK: sqrshl.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @sqrshl1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @sqrshl1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl1d:
 ;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
-        %tmp2 = load <1 x i64>, <1 x i64>* %B
+        %tmp1 = load <1 x i64>, ptr %A
+        %tmp2 = load <1 x i64>, ptr %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
         ret <1 x i64> %tmp3
 }
 
-define <1 x i64> @sqrshl1d_constant(<1 x i64>* %A) nounwind {
+define <1 x i64> @sqrshl1d_constant(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshl1d_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @sqrshl_scalar(i64* %A, i64* %B) nounwind {
+define i64 @sqrshl_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sqrshl_scalar:
 ;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, i64* %A
-        %tmp2 = load i64, i64* %B
+        %tmp1 = load i64, ptr %A
+        %tmp2 = load i64, ptr %B
         %tmp3 = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %tmp1, i64 %tmp2)
         ret i64 %tmp3
 }
 
-define i64 @sqrshl_scalar_constant(i64* %A) nounwind {
+define i64 @sqrshl_scalar_constant(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshl_scalar_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: sqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.sqrshl.i64(i64 %tmp1, i64 1)
         ret i64 %tmp3
 }
 
-define <16 x i8> @uqrshl16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @uqrshl16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl16b:
 ;CHECK: uqrshl.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqrshl.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @uqrshl8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @uqrshl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl8h:
 ;CHECK: uqrshl.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @uqrshl4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @uqrshl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl4s:
 ;CHECK: uqrshl.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @uqrshl2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @uqrshl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl2d:
 ;CHECK: uqrshl.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @uqrshl1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @uqrshl1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl1d:
 ;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
-        %tmp2 = load <1 x i64>, <1 x i64>* %B
+        %tmp1 = load <1 x i64>, ptr %A
+        %tmp2 = load <1 x i64>, ptr %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
         ret <1 x i64> %tmp3
 }
 
-define <1 x i64> @uqrshl1d_constant(<1 x i64>* %A) nounwind {
+define <1 x i64> @uqrshl1d_constant(ptr %A) nounwind {
 ;CHECK-LABEL: uqrshl1d_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.uqrshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @uqrshl_scalar(i64* %A, i64* %B) nounwind {
+define i64 @uqrshl_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: uqrshl_scalar:
 ;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-        %tmp1 = load i64, i64* %A
-        %tmp2 = load i64, i64* %B
+        %tmp1 = load i64, ptr %A
+        %tmp2 = load i64, ptr %B
         %tmp3 = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %tmp1, i64 %tmp2)
         ret i64 %tmp3
 }
 
-define i64 @uqrshl_scalar_constant(i64* %A) nounwind {
+define i64 @uqrshl_scalar_constant(ptr %A) nounwind {
 ;CHECK-LABEL: uqrshl_scalar_constant:
 ;CHECK: mov w[[GCONST:[0-9]+]], #1
 ;CHECK: fmov d[[DCONST:[0-9]+]], x[[GCONST]]
 ;CHECK: uqrshl {{d[0-9]+}}, {{d[0-9]+}}, d[[DCONST]]
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.uqrshl.i64(i64 %tmp1, i64 1)
         ret i64 %tmp3
 }
@@ -665,218 +665,218 @@ declare <8 x i16> @llvm.aarch64.neon.uqrshl.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <4 x i32> @llvm.aarch64.neon.uqrshl.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.uqrshl.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
-define <8 x i8> @urshr8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @urshr8b(ptr %A) nounwind {
 ;CHECK-LABEL: urshr8b:
 ;CHECK: urshr.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @urshr4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @urshr4h(ptr %A) nounwind {
 ;CHECK-LABEL: urshr4h:
 ;CHECK: urshr.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @urshr2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @urshr2s(ptr %A) nounwind {
 ;CHECK-LABEL: urshr2s:
 ;CHECK: urshr.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @urshr16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @urshr16b(ptr %A) nounwind {
 ;CHECK-LABEL: urshr16b:
 ;CHECK: urshr.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @urshr8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @urshr8h(ptr %A) nounwind {
 ;CHECK-LABEL: urshr8h:
 ;CHECK: urshr.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @urshr4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @urshr4s(ptr %A) nounwind {
 ;CHECK-LABEL: urshr4s:
 ;CHECK: urshr.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @urshr2d(<2 x i64>* %A) nounwind {
+define <2 x i64> @urshr2d(ptr %A) nounwind {
 ;CHECK-LABEL: urshr2d:
 ;CHECK: urshr.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
         ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @urshr1d(<1 x i64>* %A) nounwind {
+define <1 x i64> @urshr1d(ptr %A) nounwind {
 ;CHECK-LABEL: urshr1d:
 ;CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @urshr_scalar(i64* %A) nounwind {
+define i64 @urshr_scalar(ptr %A) nounwind {
 ;CHECK-LABEL: urshr_scalar:
 ;CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 -1)
         ret i64 %tmp3
 }
 
-define <8 x i8> @srshr8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @srshr8b(ptr %A) nounwind {
 ;CHECK-LABEL: srshr8b:
 ;CHECK: srshr.8b
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @srshr4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @srshr4h(ptr %A) nounwind {
 ;CHECK-LABEL: srshr4h:
 ;CHECK: srshr.4h
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @srshr2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @srshr2s(ptr %A) nounwind {
 ;CHECK-LABEL: srshr2s:
 ;CHECK: srshr.2s
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @srshr16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @srshr16b(ptr %A) nounwind {
 ;CHECK-LABEL: srshr16b:
 ;CHECK: srshr.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @srshr8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @srshr8h(ptr %A) nounwind {
 ;CHECK-LABEL: srshr8h:
 ;CHECK: srshr.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @srshr4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @srshr4s(ptr %A) nounwind {
 ;CHECK-LABEL: srshr4s:
 ;CHECK: srshr.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @srshr2d(<2 x i64>* %A) nounwind {
+define <2 x i64> @srshr2d(ptr %A) nounwind {
 ;CHECK-LABEL: srshr2d:
 ;CHECK: srshr.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
         ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @srshr1d(<1 x i64>* %A) nounwind {
+define <1 x i64> @srshr1d(ptr %A) nounwind {
 ;CHECK-LABEL: srshr1d:
 ;CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @srshr_scalar(i64* %A) nounwind {
+define i64 @srshr_scalar(ptr %A) nounwind {
 ;CHECK-LABEL: srshr_scalar:
 ;CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 -1)
         ret i64 %tmp3
 }
 
-define <8 x i8> @sqshlu8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @sqshlu8b(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu8b:
 ;CHECK: sqshlu.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqshlu4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @sqshlu4h(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu4h:
 ;CHECK: sqshlu.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqshlu2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @sqshlu2s(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu2s:
 ;CHECK: sqshlu.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqshlu16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @sqshlu16b(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu16b:
 ;CHECK: sqshlu.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @sqshlu8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @sqshlu8h(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu8h:
 ;CHECK: sqshlu.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sqshlu4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @sqshlu4s(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu4s:
 ;CHECK: sqshlu.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqshlu2d(<2 x i64>* %A) nounwind {
+define <2 x i64> @sqshlu2d(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu2d:
 ;CHECK: sqshlu.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
         ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @sqshlu1d_constant(<1 x i64>* %A) nounwind {
+define <1 x i64> @sqshlu1d_constant(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu1d_constant:
 ;CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 1>)
         ret <1 x i64> %tmp3
 }
 
-define i64 @sqshlu_scalar_constant(i64* %A) nounwind {
+define i64 @sqshlu_scalar_constant(ptr %A) nounwind {
 ;CHECK-LABEL: sqshlu_scalar_constant:
 ;CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.sqshlu.i64(i64 %tmp1, i64 1)
         ret i64 %tmp3
 }
@@ -892,55 +892,55 @@ declare <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16>, <8 x i16>) nounwind
 declare <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 declare <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
 
-define <8 x i8> @rshrn8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @rshrn8b(ptr %A) nounwind {
 ;CHECK-LABEL: rshrn8b:
 ;CHECK: rshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @rshrn4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @rshrn4h(ptr %A) nounwind {
 ;CHECK-LABEL: rshrn4h:
 ;CHECK: rshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @rshrn2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @rshrn2s(ptr %A) nounwind {
 ;CHECK-LABEL: rshrn2s:
 ;CHECK: rshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @rshrn16b(<8 x i8> *%ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @rshrn16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: rshrn16b:
 ;CHECK: rshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.rshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @rshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @rshrn8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: rshrn8h:
 ;CHECK: rshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, <4 x i16>* %ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %out = load <4 x i16>, ptr %ret
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @rshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @rshrn4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: rshrn4s:
 ;CHECK: rshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, <2 x i32>* %ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %out = load <2 x i32>, ptr %ret
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -950,60 +950,60 @@ declare <8 x i8>  @llvm.aarch64.neon.rshrn.v8i8(<8 x i16>, i32) nounwind readnon
 declare <4 x i16> @llvm.aarch64.neon.rshrn.v4i16(<4 x i32>, i32) nounwind readnone
 declare <2 x i32> @llvm.aarch64.neon.rshrn.v2i32(<2 x i64>, i32) nounwind readnone
 
-define <8 x i8> @shrn8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @shrn8b(ptr %A) nounwind {
 ;CHECK-LABEL: shrn8b:
 ;CHECK: shrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @shrn4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @shrn4h(ptr %A) nounwind {
 ;CHECK-LABEL: shrn4h:
 ;CHECK: shrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @shrn2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @shrn2s(ptr %A) nounwind {
 ;CHECK-LABEL: shrn2s:
 ;CHECK: shrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @shrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @shrn16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: shrn16b:
 ;CHECK: shrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp2 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @shrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @shrn8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: shrn8h:
 ;CHECK: shrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, <4 x i16>* %ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %out = load <4 x i16>, ptr %ret
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp2 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @shrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @shrn4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: shrn4s:
 ;CHECK: shrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, <2 x i32>* %ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %out = load <2 x i32>, ptr %ret
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp2 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1021,56 +1021,56 @@ define i32 @sqshrn1s(i64 %A) nounwind {
   ret i32 %tmp
 }
 
-define <8 x i8> @sqshrn8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @sqshrn8b(ptr %A) nounwind {
 ;CHECK-LABEL: sqshrn8b:
 ;CHECK: sqshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqshrn4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @sqshrn4h(ptr %A) nounwind {
 ;CHECK-LABEL: sqshrn4h:
 ;CHECK: sqshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqshrn2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @sqshrn2s(ptr %A) nounwind {
 ;CHECK-LABEL: sqshrn2s:
 ;CHECK: sqshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
 
 
-define <16 x i8> @sqshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @sqshrn16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqshrn16b:
 ;CHECK: sqshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @sqshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @sqshrn8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqshrn8h:
 ;CHECK: sqshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, <4 x i16>* %ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %out = load <4 x i16>, ptr %ret
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @sqshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @sqshrn4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqshrn4s:
 ;CHECK: sqshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, <2 x i32>* %ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %out = load <2 x i32>, ptr %ret
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -1088,55 +1088,55 @@ define i32 @sqshrun1s(i64 %A) nounwind {
   ret i32 %tmp
 }
 
-define <8 x i8> @sqshrun8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @sqshrun8b(ptr %A) nounwind {
 ;CHECK-LABEL: sqshrun8b:
 ;CHECK: sqshrun.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqshrun4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @sqshrun4h(ptr %A) nounwind {
 ;CHECK-LABEL: sqshrun4h:
 ;CHECK: sqshrun.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqshrun2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @sqshrun2s(ptr %A) nounwind {
 ;CHECK-LABEL: sqshrun2s:
 ;CHECK: sqshrun.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqshrun16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @sqshrun16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqshrun16b:
 ;CHECK: sqshrun2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshrun.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @sqshrun8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @sqshrun8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqshrun8h:
 ;CHECK: sqshrun2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, <4 x i16>* %ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %out = load <4 x i16>, ptr %ret
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshrun.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @sqshrun4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @sqshrun4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqshrun4s:
 ;CHECK: sqshrun2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, <2 x i32>* %ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %out = load <2 x i32>, ptr %ret
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshrun.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -1154,55 +1154,55 @@ define i32 @sqrshrn1s(i64 %A) nounwind {
   ret i32 %tmp
 }
 
-define <8 x i8> @sqrshrn8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @sqrshrn8b(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrn8b:
 ;CHECK: sqrshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqrshrn4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @sqrshrn4h(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrn4h:
 ;CHECK: sqrshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqrshrn2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @sqrshrn2s(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrn2s:
 ;CHECK: sqrshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqrshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @sqrshrn16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrn16b:
 ;CHECK: sqrshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @sqrshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @sqrshrn8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrn8h:
 ;CHECK: sqrshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, <4 x i16>* %ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %out = load <4 x i16>, ptr %ret
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @sqrshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @sqrshrn4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrn4s:
 ;CHECK: sqrshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, <2 x i32>* %ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %out = load <2 x i32>, ptr %ret
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -1220,55 +1220,55 @@ define i32 @sqrshrun1s(i64 %A) nounwind {
   ret i32 %tmp
 }
 
-define <8 x i8> @sqrshrun8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @sqrshrun8b(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrun8b:
 ;CHECK: sqrshrun.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqrshrun4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @sqrshrun4h(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrun4h:
 ;CHECK: sqrshrun.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqrshrun2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @sqrshrun2s(ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrun2s:
 ;CHECK: sqrshrun.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqrshrun16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @sqrshrun16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrun16b:
 ;CHECK: sqrshrun2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqrshrun.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @sqrshrun8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @sqrshrun8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrun8h:
 ;CHECK: sqrshrun2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, <4 x i16>* %ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %out = load <4 x i16>, ptr %ret
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqrshrun.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @sqrshrun4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @sqrshrun4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: sqrshrun4s:
 ;CHECK: sqrshrun2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, <2 x i32>* %ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %out = load <2 x i32>, ptr %ret
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqrshrun.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -1286,55 +1286,55 @@ define i32 @uqrshrn1s(i64 %A) nounwind {
   ret i32 %tmp
 }
 
-define <8 x i8> @uqrshrn8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @uqrshrn8b(ptr %A) nounwind {
 ;CHECK-LABEL: uqrshrn8b:
 ;CHECK: uqrshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @uqrshrn4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @uqrshrn4h(ptr %A) nounwind {
 ;CHECK-LABEL: uqrshrn4h:
 ;CHECK: uqrshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uqrshrn2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @uqrshrn2s(ptr %A) nounwind {
 ;CHECK-LABEL: uqrshrn2s:
 ;CHECK: uqrshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @uqrshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @uqrshrn16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: uqrshrn16b:
 ;CHECK: uqrshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqrshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @uqrshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @uqrshrn8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: uqrshrn8h:
 ;CHECK: uqrshrn2.8h v0, {{v[0-9]+}}, #1
-        %out = load <4 x i16>, <4 x i16>* %ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %out = load <4 x i16>, ptr %ret
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqrshrn.v4i16(<4 x i32> %tmp1, i32 1)
         %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @uqrshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @uqrshrn4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: uqrshrn4s:
 ;CHECK: uqrshrn2.4s v0, {{v[0-9]+}}, #1
-        %out = load <2 x i32>, <2 x i32>* %ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %out = load <2 x i32>, ptr %ret
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqrshrn.v2i32(<2 x i64> %tmp1, i32 1)
         %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %tmp4
@@ -1352,55 +1352,55 @@ define i32 @uqshrn1s(i64 %A) nounwind {
   ret i32 %tmp
 }
 
-define <8 x i8> @uqshrn8b(<8 x i16>* %A) nounwind {
+define <8 x i8> @uqshrn8b(ptr %A) nounwind {
 ;CHECK-LABEL: uqshrn8b:
 ;CHECK: uqshrn.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @uqshrn4h(<4 x i32>* %A) nounwind {
+define <4 x i16> @uqshrn4h(ptr %A) nounwind {
 ;CHECK-LABEL: uqshrn4h:
 ;CHECK: uqshrn.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uqshrn2s(<2 x i64>* %A) nounwind {
+define <2 x i32> @uqshrn2s(ptr %A) nounwind {
 ;CHECK-LABEL: uqshrn2s:
 ;CHECK: uqshrn.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @uqshrn16b(<8 x i8>* %ret, <8 x i16>* %A) nounwind {
+define <16 x i8> @uqshrn16b(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: uqshrn16b:
 ;CHECK: uqshrn2.16b v0, {{v[0-9]+}}, #1
-        %out = load <8 x i8>, <8 x i8>* %ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %out = load <8 x i8>, ptr %ret
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16> %tmp1, i32 1)
         %tmp4 = shufflevector <8 x i8> %out, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @uqshrn8h(<4 x i16>* %ret, <4 x i32>* %A) nounwind {
+define <8 x i16> @uqshrn8h(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: uqshrn8h:
 ;CHECK: uqshrn2.8h v0, {{v[0-9]+}}, #1
-  %out = load <4 x i16>, <4 x i16>* %ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %out = load <4 x i16>, ptr %ret
+  %tmp1 = load <4 x i32>, ptr %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32> %tmp1, i32 1)
   %tmp4 = shufflevector <4 x i16> %out, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @uqshrn4s(<2 x i32>* %ret, <2 x i64>* %A) nounwind {
+define <4 x i32> @uqshrn4s(ptr %ret, ptr %A) nounwind {
 ;CHECK-LABEL: uqshrn4s:
 ;CHECK: uqshrn2.4s v0, {{v[0-9]+}}, #1
-  %out = load <2 x i32>, <2 x i32>* %ret
-  %tmp1 = load <2 x i64>, <2 x i64>* %A
+  %out = load <2 x i32>, ptr %ret
+  %tmp1 = load <2 x i64>, ptr %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64> %tmp1, i32 1)
   %tmp4 = shufflevector <2 x i32> %out, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i32> %tmp4
@@ -1411,57 +1411,57 @@ declare <8 x i8>  @llvm.aarch64.neon.uqshrn.v8i8(<8 x i16>, i32) nounwind readno
 declare <4 x i16> @llvm.aarch64.neon.uqshrn.v4i16(<4 x i32>, i32) nounwind readnone
 declare <2 x i32> @llvm.aarch64.neon.uqshrn.v2i32(<2 x i64>, i32) nounwind readnone
 
-define <8 x i16> @ushll8h(<8 x i8>* %A) nounwind {
+define <8 x i16> @ushll8h(ptr %A) nounwind {
 ;CHECK-LABEL: ushll8h:
 ;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @ushll4s(<4 x i16>* %A) nounwind {
+define <4 x i32> @ushll4s(ptr %A) nounwind {
 ;CHECK-LABEL: ushll4s:
 ;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @ushll2d(<2 x i32>* %A) nounwind {
+define <2 x i64> @ushll2d(ptr %A) nounwind {
 ;CHECK-LABEL: ushll2d:
 ;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
         ret <2 x i64> %tmp3
 }
 
-define <8 x i16> @ushll2_8h(<16 x i8>* %A) nounwind {
+define <8 x i16> @ushll2_8h(ptr %A) nounwind {
 ;CHECK-LABEL: ushll2_8h:
 ;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
-        %load1 = load <16 x i8>, <16 x i8>* %A
+        %load1 = load <16 x i8>, ptr %A
         %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @ushll2_4s(<8 x i16>* %A) nounwind {
+define <4 x i32> @ushll2_4s(ptr %A) nounwind {
 ;CHECK-LABEL: ushll2_4s:
 ;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
-        %load1 = load <8 x i16>, <8 x i16>* %A
+        %load1 = load <8 x i16>, ptr %A
         %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @ushll2_2d(<4 x i32>* %A) nounwind {
+define <2 x i64> @ushll2_2d(ptr %A) nounwind {
 ;CHECK-LABEL: ushll2_2d:
 ;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
-        %load1 = load <4 x i32>, <4 x i32>* %A
+        %load1 = load <4 x i32>, ptr %A
         %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
@@ -1475,63 +1475,63 @@ declare <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64>, <2 x i64>)
 declare <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64>, <1 x i64>)
 declare i64 @llvm.aarch64.neon.ushl.i64(i64, i64)
 
-define <8 x i16> @neon.ushll8h_constant_shift(<8 x i8>* %A) nounwind {
+define <8 x i16> @neon.ushll8h_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.ushll8h_constant_shift
 ;CHECK: ushll.8h v0, {{v[0-9]+}}, #1
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp1 = load <8 x i8>, ptr %A
   %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %tmp2, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   ret <8 x i16> %tmp3
 }
 
-define <8 x i16> @neon.ushl8h_no_constant_shift(<8 x i8>* %A) nounwind {
+define <8 x i16> @neon.ushl8h_no_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.ushl8h_no_constant_shift
 ;CHECK: ushl.8h v0, v0, v0
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp1 = load <8 x i8>, ptr %A
   %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp2)
   ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @neon.ushl8h_constant_shift_extend_not_2x(<4 x i8>* %A) nounwind {
+define <4 x i32> @neon.ushl8h_constant_shift_extend_not_2x(ptr %A) nounwind {
 ; CHECK-LABEL: neon.ushl8h_constant_shift_extend_not_2x:
 ; CHECK: // %bb.0:
 ; CHECK-NEXT: ldr s0, [x0]
 ; CHECK-NEXT: ushll.8h v0, v0, #0
 ; CHECK-NEXT: ushll.4s v0, v0, #1
 ; CHECK-NEXT: ret
-  %tmp1 = load <4 x i8>, <4 x i8>* %A
+  %tmp1 = load <4 x i8>, ptr %A
   %tmp2 = zext <4 x i8> %tmp1 to <4 x i32>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
   ret <4 x i32> %tmp3
 }
 
-define <8 x i16> @neon.ushl8_noext_constant_shift(<8 x i16>* %A) nounwind {
+define <8 x i16> @neon.ushl8_noext_constant_shift(ptr %A) nounwind {
 ; CHECK-LABEL: neon.ushl8_noext_constant_shift
 ; CHECK:      ldr       q0, [x0]
 ; CHECK-NEXT: shl.8h   v0, v0, #1
 ; CHECK-NEXT: ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.ushl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
   ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @neon.ushll4s_constant_shift(<4 x i16>* %A) nounwind {
+define <4 x i32> @neon.ushll4s_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.ushll4s_constant_shift
 ;CHECK: ushll.4s v0, {{v[0-9]+}}, #1
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp1 = load <4 x i16>, ptr %A
   %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
   ret <4 x i32> %tmp3
 }
 
 ; FIXME: unnecessary ushll.4s v0, v0, #0?
-define <4 x i32> @neon.ushll4s_neg_constant_shift(<4 x i16>* %A) nounwind {
+define <4 x i32> @neon.ushll4s_neg_constant_shift(ptr %A) nounwind {
 ; CHECK-LABEL: neon.ushll4s_neg_constant_shift
 ; CHECK: movi.2d v1, #0xffffffffffffffff
 ; CHECK: ushll.4s v0, v0, #0
 ; CHECK: ushl.4s v0, v0, v1
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp1 = load <4 x i16>, ptr %A
   %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.ushl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
   ret <4 x i32> %tmp3
@@ -1546,46 +1546,46 @@ define <4 x i32> @neon.ushll4s_constant_fold() nounwind {
   ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @neon.ushll2d_constant_shift(<2 x i32>* %A) nounwind {
+define <2 x i64> @neon.ushll2d_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.ushll2d_constant_shift
 ;CHECK: ushll.2d v0, {{v[0-9]+}}, #1
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
+  %tmp1 = load <2 x i32>, ptr %A
   %tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.ushl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 1, i64 1>)
   ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @neon.ushl_vscalar_constant_shift(<1 x i32>* %A) nounwind {
+define <1 x i64> @neon.ushl_vscalar_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.ushl_vscalar_constant_shift
 ;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
-  %tmp1 = load <1 x i32>, <1 x i32>* %A
+  %tmp1 = load <1 x i32>, ptr %A
   %tmp2 = zext <1 x i32> %tmp1 to <1 x i64>
   %tmp3 = call <1 x i64> @llvm.aarch64.neon.ushl.v1i64(<1 x i64> %tmp2, <1 x i64> <i64 1>)
   ret <1 x i64> %tmp3
 }
 
-define i64 @neon.ushl_scalar_constant_shift(i32* %A) nounwind {
+define i64 @neon.ushl_scalar_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.ushl_scalar_constant_shift
 ;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
-  %tmp1 = load i32, i32* %A
+  %tmp1 = load i32, ptr %A
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = call i64 @llvm.aarch64.neon.ushl.i64(i64 %tmp2, i64 1)
   ret i64 %tmp3
 }
 
-define <8 x i16> @sshll8h(<8 x i8>* %A) nounwind {
+define <8 x i16> @sshll8h(ptr %A) nounwind {
 ;CHECK-LABEL: sshll8h:
 ;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         ret <8 x i16> %tmp3
 }
 
-define <2 x i64> @sshll2d(<2 x i32>* %A) nounwind {
+define <2 x i64> @sshll2d(ptr %A) nounwind {
 ;CHECK-LABEL: sshll2d:
 ;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
         ret <2 x i64> %tmp3
@@ -1598,67 +1598,67 @@ declare <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64>, <2 x i64>)
 declare <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64>, <1 x i64>)
 declare i64 @llvm.aarch64.neon.sshl.i64(i64, i64)
 
-define <16 x i8> @neon.sshl16b_constant_shift(<16 x i8>* %A) nounwind {
+define <16 x i8> @neon.sshl16b_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshl16b_constant_shift
 ;CHECK: shl.16b {{v[0-9]+}}, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp2
 }
 
-define <16 x i8> @neon.sshl16b_non_splat_constant_shift(<16 x i8>* %A) nounwind {
+define <16 x i8> @neon.sshl16b_non_splat_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshl16b_non_splat_constant_shift
 ;CHECK: sshl.16b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 6, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp2
 }
 
-define <16 x i8> @neon.sshl16b_neg_constant_shift(<16 x i8>* %A) nounwind {
+define <16 x i8> @neon.sshl16b_neg_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshl16b_neg_constant_shift
 ;CHECK: sshl.16b {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp2 = call <16 x i8> @llvm.aarch64.neon.sshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2, i8 -2>)
         ret <16 x i8> %tmp2
 }
 
-define <8 x i16> @neon.sshll8h_constant_shift(<8 x i8>* %A) nounwind {
+define <8 x i16> @neon.sshll8h_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshll8h_constant_shift
 ;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sshl.v8i16(<8 x i16> %tmp2, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @neon.sshl4s_wrong_ext_constant_shift(<4 x i8>* %A) nounwind {
+define <4 x i32> @neon.sshl4s_wrong_ext_constant_shift(ptr %A) nounwind {
 ; CHECK-LABEL: neon.sshl4s_wrong_ext_constant_shift:
 ; CHECK: // %bb.0:
 ; CHECK-NEXT: ldr s0, [x0]
 ; CHECK-NEXT: sshll.8h v0, v0, #0
 ; CHECK-NEXT: sshll.4s v0, v0, #1
 ; CHECK-NEXT: ret
-        %tmp1 = load <4 x i8>, <4 x i8>* %A
+        %tmp1 = load <4 x i8>, ptr %A
         %tmp2 = sext <4 x i8> %tmp1 to <4 x i32>
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
 
-define <4 x i32> @neon.sshll4s_constant_shift(<4 x i16>* %A) nounwind {
+define <4 x i32> @neon.sshll4s_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshll4s_constant_shift
 ;CHECK: sshll.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
 
-define <4 x i32> @neon.sshll4s_neg_constant_shift(<4 x i16>* %A) nounwind {
+define <4 x i32> @neon.sshll4s_neg_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshll4s_neg_constant_shift
 ;CHECK: movi.2d v1, #0xffffffffffffffff
 ;CHECK: sshll.4s v0, v0, #0
 ;CHECK: sshl.4s v0, v0, v1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp2, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
         ret <4 x i32> %tmp3
@@ -1672,36 +1672,36 @@ define <4 x i32> @neon.sshl4s_constant_fold() nounwind {
         ret <4 x i32> %tmp3
 }
 
-define <4 x i32> @neon.sshl4s_no_fold(<4 x i32>* %A) nounwind {
+define <4 x i32> @neon.sshl4s_no_fold(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshl4s_no_fold
 ;CHECK: shl.4s {{v[0-9]+}}, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @neon.sshll2d_constant_shift(<2 x i32>* %A) nounwind {
+define <2 x i64> @neon.sshll2d_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshll2d_constant_shift
 ;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 1, i64 1>)
         ret <2 x i64> %tmp3
 }
 
-define <1 x i64> @neon.sshll_vscalar_constant_shift(<1 x i32>* %A) nounwind {
+define <1 x i64> @neon.sshll_vscalar_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshll_vscalar_constant_shift
 ;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
-  %tmp1 = load <1 x i32>, <1 x i32>* %A
+  %tmp1 = load <1 x i32>, ptr %A
   %tmp2 = zext <1 x i32> %tmp1 to <1 x i64>
   %tmp3 = call <1 x i64> @llvm.aarch64.neon.sshl.v1i64(<1 x i64> %tmp2, <1 x i64> <i64 1>)
   ret <1 x i64> %tmp3
 }
 
-define i64 @neon.sshll_scalar_constant_shift(i32* %A) nounwind {
+define i64 @neon.sshll_scalar_constant_shift(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshll_scalar_constant_shift
 ;CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #1
-  %tmp1 = load i32, i32* %A
+  %tmp1 = load i32, ptr %A
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = call i64 @llvm.aarch64.neon.sshl.i64(i64 %tmp2, i64 1)
   ret i64 %tmp3
@@ -1715,658 +1715,658 @@ define <2 x i64> @neon.sshl2d_constant_fold() nounwind {
         ret <2 x i64> %tmp3
 }
 
-define <2 x i64> @neon.sshl2d_no_fold(<2 x i64>* %A) nounwind {
+define <2 x i64> @neon.sshl2d_no_fold(ptr %A) nounwind {
 ;CHECK-LABEL: neon.sshl2d_no_fold
 ;CHECK: shl.2d {{v[0-9]+}}, {{v[0-9]+}}, #2
-        %tmp2 = load <2 x i64>, <2 x i64>* %A
+        %tmp2 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sshl.v2i64(<2 x i64> %tmp2, <2 x i64> <i64 2, i64 2>)
         ret <2 x i64> %tmp3
 }
 
-define <8 x i16> @sshll2_8h(<16 x i8>* %A) nounwind {
+define <8 x i16> @sshll2_8h(ptr %A) nounwind {
 ;CHECK-LABEL: sshll2_8h:
 ;CHECK: sshll.8h v0, {{v[0-9]+}}, #1
-        %load1 = load <16 x i8>, <16 x i8>* %A
+        %load1 = load <16 x i8>, ptr %A
         %tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
         %tmp3 = shl <8 x i16> %tmp2, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sshll2_4s(<8 x i16>* %A) nounwind {
+define <4 x i32> @sshll2_4s(ptr %A) nounwind {
 ;CHECK-LABEL: sshll2_4s:
 ;CHECK: sshll.4s v0, {{v[0-9]+}}, #1
-        %load1 = load <8 x i16>, <8 x i16>* %A
+        %load1 = load <8 x i16>, ptr %A
         %tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
         %tmp3 = shl <4 x i32> %tmp2, <i32 1, i32 1, i32 1, i32 1>
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sshll2_2d(<4 x i32>* %A) nounwind {
+define <2 x i64> @sshll2_2d(ptr %A) nounwind {
 ;CHECK-LABEL: sshll2_2d:
 ;CHECK: sshll.2d v0, {{v[0-9]+}}, #1
-        %load1 = load <4 x i32>, <4 x i32>* %A
+        %load1 = load <4 x i32>, ptr %A
         %tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
         %tmp3 = shl <2 x i64> %tmp2, <i64 1, i64 1>
         ret <2 x i64> %tmp3
 }
 
-define <8 x i8> @sqshli8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @sqshli8b(ptr %A) nounwind {
 ;CHECK-LABEL: sqshli8b:
 ;CHECK: sqshl.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sqshli4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @sqshli4h(ptr %A) nounwind {
 ;CHECK-LABEL: sqshli4h:
 ;CHECK: sqshl.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sqshli2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @sqshli2s(ptr %A) nounwind {
 ;CHECK-LABEL: sqshli2s:
 ;CHECK: sqshl.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @sqshli16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @sqshli16b(ptr %A) nounwind {
 ;CHECK-LABEL: sqshli16b:
 ;CHECK: sqshl.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @sqshli8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @sqshli8h(ptr %A) nounwind {
 ;CHECK-LABEL: sqshli8h:
 ;CHECK: sqshl.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sqshli4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @sqshli4s(ptr %A) nounwind {
 ;CHECK-LABEL: sqshli4s:
 ;CHECK: sqshl.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sqshli2d(<2 x i64>* %A) nounwind {
+define <2 x i64> @sqshli2d(ptr %A) nounwind {
 ;CHECK-LABEL: sqshli2d:
 ;CHECK: sqshl.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
         ret <2 x i64> %tmp3
 }
 
-define <8 x i8> @uqshli8b(<8 x i8>* %A) nounwind {
+define <8 x i8> @uqshli8b(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli8b:
 ;CHECK: uqshl.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <8 x i8> %tmp3
 }
 
-define <8 x i8> @uqshli8b_1(<8 x i8>* %A) nounwind {
+define <8 x i8> @uqshli8b_1(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli8b_1:
 ;CHECK: movi.8b [[REG:v[0-9]+]], #8
 ;CHECK: uqshl.8b v0, v0, [[REG]]
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @uqshli4h(<4 x i16>* %A) nounwind {
+define <4 x i16> @uqshli4h(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli4h:
 ;CHECK: uqshl.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @uqshli2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @uqshli2s(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli2s:
 ;CHECK: uqshl.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 1, i32 1>)
         ret <2 x i32> %tmp3
 }
 
-define <16 x i8> @uqshli16b(<16 x i8>* %A) nounwind {
+define <16 x i8> @uqshli16b(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli16b:
 ;CHECK: uqshl.16b
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @uqshli8h(<8 x i16>* %A) nounwind {
+define <8 x i16> @uqshli8h(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli8h:
 ;CHECK: uqshl.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @uqshli4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @uqshli4s(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli4s:
 ;CHECK: uqshl.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @uqshli2d(<2 x i64>* %A) nounwind {
+define <2 x i64> @uqshli2d(ptr %A) nounwind {
 ;CHECK-LABEL: uqshli2d:
 ;CHECK: uqshl.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 1, i64 1>)
         ret <2 x i64> %tmp3
 }
 
-define <8 x i8> @ursra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @ursra8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra8b:
 ;CHECK: ursra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <8 x i8>, <8 x i8>* %B
+        %tmp4 = load <8 x i8>, ptr %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @ursra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @ursra4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra4h:
 ;CHECK: ursra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @ursra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @ursra2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra2s:
 ;CHECK: ursra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <16 x i8> @ursra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @ursra16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra16b:
 ;CHECK: ursra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <16 x i8>, <16 x i8>* %B
+        %tmp4 = load <16 x i8>, ptr %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @ursra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @ursra8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra8h:
 ;CHECK: ursra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @ursra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @ursra4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra4s:
 ;CHECK: ursra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @ursra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @ursra2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra2d:
 ;CHECK: ursra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
 
-define <1 x i64> @ursra1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @ursra1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra1d:
 ;CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
-        %tmp4 = load <1 x i64>, <1 x i64>* %B
+        %tmp4 = load <1 x i64>, ptr %B
         %tmp5 = add <1 x i64> %tmp3, %tmp4
         ret <1 x i64> %tmp5
 }
 
-define i64 @ursra_scalar(i64* %A, i64* %B) nounwind {
+define i64 @ursra_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ursra_scalar:
 ;CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.urshl.i64(i64 %tmp1, i64 -1)
-        %tmp4 = load i64, i64* %B
+        %tmp4 = load i64, ptr %B
         %tmp5 = add i64 %tmp3, %tmp4
         ret i64 %tmp5
 }
 
-define <8 x i8> @srsra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @srsra8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra8b:
 ;CHECK: srsra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %tmp1, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <8 x i8>, <8 x i8>* %B
+        %tmp4 = load <8 x i8>, ptr %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @srsra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @srsra4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra4h:
 ;CHECK: srsra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %tmp1, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @srsra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @srsra2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra2s:
 ;CHECK: srsra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %tmp1, <2 x i32> <i32 -1, i32 -1>)
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <16 x i8> @srsra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @srsra16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra16b:
 ;CHECK: srsra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %tmp1, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
-        %tmp4 = load <16 x i8>, <16 x i8>* %B
+        %tmp4 = load <16 x i8>, ptr %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @srsra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @srsra8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra8h:
 ;CHECK: srsra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %tmp1, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @srsra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @srsra4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra4s:
 ;CHECK: srsra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %tmp1, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @srsra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @srsra2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra2d:
 ;CHECK: srsra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %tmp1, <2 x i64> <i64 -1, i64 -1>)
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
 
-define <1 x i64> @srsra1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @srsra1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra1d:
 ;CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %tmp1, <1 x i64> <i64 -1>)
-        %tmp4 = load <1 x i64>, <1 x i64>* %B
+        %tmp4 = load <1 x i64>, ptr %B
         %tmp5 = add <1 x i64> %tmp3, %tmp4
         ret <1 x i64> %tmp5
 }
 
-define i64 @srsra_scalar(i64* %A, i64* %B) nounwind {
+define i64 @srsra_scalar(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: srsra_scalar:
 ;CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load i64, i64* %A
+        %tmp1 = load i64, ptr %A
         %tmp3 = call i64 @llvm.aarch64.neon.srshl.i64(i64 %tmp1, i64 -1)
-        %tmp4 = load i64, i64* %B
+        %tmp4 = load i64, ptr %B
         %tmp5 = add i64 %tmp3, %tmp4
         ret i64 %tmp5
 }
 
-define <8 x i8> @usra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @usra8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra8b:
 ;CHECK: usra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <8 x i8>, <8 x i8>* %B
+        %tmp4 = load <8 x i8>, ptr %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @usra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @usra4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra4h:
 ;CHECK: usra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @usra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @usra2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra2s:
 ;CHECK: usra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <16 x i8> @usra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @usra16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra16b:
 ;CHECK: usra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <16 x i8>, <16 x i8>* %B
+        %tmp4 = load <16 x i8>, ptr %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @usra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @usra8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra8h:
 ;CHECK: usra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @usra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @usra4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra4s:
 ;CHECK: usra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @usra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @usra2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra2d:
 ;CHECK: usra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
 
-define <1 x i64> @usra1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @usra1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usra1d:
 ;CHECK: usra {{d[0-9]+}}, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
+        %tmp1 = load <1 x i64>, ptr %A
         %tmp3 = lshr <1 x i64> %tmp1, <i64 1>
-        %tmp4 = load <1 x i64>, <1 x i64>* %B
+        %tmp4 = load <1 x i64>, ptr %B
         %tmp5 = add <1 x i64> %tmp3, %tmp4
          ret <1 x i64> %tmp5
 }
 
-define <8 x i8> @ssra8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @ssra8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssra8b:
 ;CHECK: ssra.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
+        %tmp1 = load <8 x i8>, ptr %A
         %tmp3 = ashr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <8 x i8>, <8 x i8>* %B
+        %tmp4 = load <8 x i8>, ptr %B
         %tmp5 = add <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @ssra4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @ssra4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssra4h:
 ;CHECK: ssra.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
+        %tmp1 = load <4 x i16>, ptr %A
         %tmp3 = ashr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp5 = add <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @ssra2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @ssra2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssra2s:
 ;CHECK: ssra.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
+        %tmp1 = load <2 x i32>, ptr %A
         %tmp3 = ashr <2 x i32> %tmp1, <i32 1, i32 1>
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp5 = add <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <16 x i8> @ssra16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @ssra16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssra16b:
 ;CHECK: ssra.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %tmp3 = ashr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-        %tmp4 = load <16 x i8>, <16 x i8>* %B
+        %tmp4 = load <16 x i8>, ptr %B
         %tmp5 = add <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @ssra8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @ssra8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssra8h:
 ;CHECK: ssra.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %tmp3 = ashr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp5 = add <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @ssra4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @ssra4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssra4s:
 ;CHECK: ssra.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %tmp3 = ashr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp5 = add <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @ssra2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @ssra2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssra2d:
 ;CHECK: ssra.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
         %tmp3 = ashr <2 x i64> %tmp1, <i64 1, i64 1>
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp5 = add <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
 
-define <8 x i8> @shr_orr8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @shr_orr8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shr_orr8b:
 ;CHECK: shr.8b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp4 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp4 = load <8 x i8>, ptr %B
         %tmp3 = lshr <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @shr_orr4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @shr_orr4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shr_orr4h:
 ;CHECK: shr.4h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp3 = lshr <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @shr_orr2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @shr_orr2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shr_orr2s:
 ;CHECK: shr.2s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp3 = lshr <2 x i32> %tmp1, <i32 1, i32 1>
         %tmp5 = or <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <16 x i8> @shr_orr16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @shr_orr16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shr_orr16b:
 ;CHECK: shr.16b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp4 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp4 = load <16 x i8>, ptr %B
         %tmp3 = lshr <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @shr_orr8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @shr_orr8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shr_orr8h:
 ;CHECK: shr.8h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp3 = lshr <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @shr_orr4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @shr_orr4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shr_orr4s:
 ;CHECK: shr.4s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp3 = lshr <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp5 = or <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @shr_orr2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @shr_orr2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shr_orr2d:
 ;CHECK: shr.2d v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp3 = lshr <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp5 = or <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
 }
 
-define <8 x i8> @shl_orr8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @shl_orr8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shl_orr8b:
 ;CHECK: shl.8b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp4 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp4 = load <8 x i8>, ptr %B
         %tmp3 = shl <8 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <8 x i8> %tmp3, %tmp4
         ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @shl_orr4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @shl_orr4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shl_orr4h:
 ;CHECK: shl.4h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp4 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp4 = load <4 x i16>, ptr %B
         %tmp3 = shl <4 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <4 x i16> %tmp3, %tmp4
         ret <4 x i16> %tmp5
 }
 
-define <2 x i32> @shl_orr2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @shl_orr2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shl_orr2s:
 ;CHECK: shl.2s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.8b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp4 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp4 = load <2 x i32>, ptr %B
         %tmp3 = shl <2 x i32> %tmp1, <i32 1, i32 1>
         %tmp5 = or <2 x i32> %tmp3, %tmp4
         ret <2 x i32> %tmp5
 }
 
-define <16 x i8> @shl_orr16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @shl_orr16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shl_orr16b:
 ;CHECK: shl.16b v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp4 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp4 = load <16 x i8>, ptr %B
         %tmp3 = shl <16 x i8> %tmp1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
         %tmp5 = or <16 x i8> %tmp3, %tmp4
          ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @shl_orr8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @shl_orr8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shl_orr8h:
 ;CHECK: shl.8h v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp4 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp4 = load <8 x i16>, ptr %B
         %tmp3 = shl <8 x i16> %tmp1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
         %tmp5 = or <8 x i16> %tmp3, %tmp4
          ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @shl_orr4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @shl_orr4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shl_orr4s:
 ;CHECK: shl.4s v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp4 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp4 = load <4 x i32>, ptr %B
         %tmp3 = shl <4 x i32> %tmp1, <i32 1, i32 1, i32 1, i32 1>
         %tmp5 = or <4 x i32> %tmp3, %tmp4
          ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @shl_orr2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @shl_orr2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: shl_orr2d:
 ;CHECK: shl.2d v0, {{v[0-9]+}}, #1
 ;CHECK-NEXT: orr.16b
 ;CHECK-NEXT: ret
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp4 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp4 = load <2 x i64>, ptr %B
         %tmp3 = shl <2 x i64> %tmp1, <i64 1, i64 1>
         %tmp5 = or <2 x i64> %tmp3, %tmp4
          ret <2 x i64> %tmp5
@@ -2389,74 +2389,74 @@ define <4 x i32> @shll_high(<8 x i16> %in) {
   ret <4 x i32> %res
 }
 
-define <8 x i8> @sli8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @sli8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli8b:
 ;CHECK: sli.8b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, i32 1)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @sli4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @sli4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli4h:
 ;CHECK: sli.4h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, i32 1)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @sli2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @sli2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli2s:
 ;CHECK: sli.2s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, i32 1)
         ret <2 x i32> %tmp3
 }
 
-define <1 x i64> @sli1d(<1 x i64>* %A, <1 x i64>* %B) nounwind {
+define <1 x i64> @sli1d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli1d:
 ;CHECK: sli d0, {{d[0-9]+}}, #1
-        %tmp1 = load <1 x i64>, <1 x i64>* %A
-        %tmp2 = load <1 x i64>, <1 x i64>* %B
+        %tmp1 = load <1 x i64>, ptr %A
+        %tmp2 = load <1 x i64>, ptr %B
         %tmp3 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, i32 1)
         ret <1 x i64> %tmp3
 }
 
-define <16 x i8> @sli16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @sli16b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli16b:
 ;CHECK: sli.16b v0, {{v[0-9]+}}, #1
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp1 = load <16 x i8>, ptr %A
+        %tmp2 = load <16 x i8>, ptr %B
         %tmp3 = call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, i32 1)
         ret <16 x i8> %tmp3
 }
 
-define <8 x i16> @sli8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @sli8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli8h:
 ;CHECK: sli.8h v0, {{v[0-9]+}}, #1
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, i32 1)
         ret <8 x i16> %tmp3
 }
 
-define <4 x i32> @sli4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @sli4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli4s:
 ;CHECK: sli.4s v0, {{v[0-9]+}}, #1
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, i32 1)
         ret <4 x i32> %tmp3
 }
 
-define <2 x i64> @sli2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @sli2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: sli2d:
 ;CHECK: sli.2d v0, {{v[0-9]+}}, #1
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, i32 1)
         ret <2 x i64> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vshr.ll b/llvm/test/CodeGen/AArch64/arm64-vshr.ll
index bd5aa2505be23..39084c8cff8ab 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshr.ll
@@ -8,10 +8,10 @@ define <8 x i16> @testShiftRightArith_v8i16(<8 x i16> %a, <8 x i16> %b) #0 {
 entry:
   %a.addr = alloca <8 x i16>, align 16
   %b.addr = alloca <8 x i16>, align 16
-  store <8 x i16> %a, <8 x i16>* %a.addr, align 16
-  store <8 x i16> %b, <8 x i16>* %b.addr, align 16
-  %0 = load <8 x i16>, <8 x i16>* %a.addr, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b.addr, align 16
+  store <8 x i16> %a, ptr %a.addr, align 16
+  store <8 x i16> %b, ptr %b.addr, align 16
+  %0 = load <8 x i16>, ptr %a.addr, align 16
+  %1 = load <8 x i16>, ptr %b.addr, align 16
   %shr = ashr <8 x i16> %0, %1
   ret <8 x i16> %shr
 }
@@ -23,10 +23,10 @@ define <4 x i32> @testShiftRightArith_v4i32(<4 x i32> %a, <4 x i32> %b) #0 {
 entry:
   %a.addr = alloca <4 x i32>, align 32
   %b.addr = alloca <4 x i32>, align 32
-  store <4 x i32> %a, <4 x i32>* %a.addr, align 32
-  store <4 x i32> %b, <4 x i32>* %b.addr, align 32
-  %0 = load <4 x i32>, <4 x i32>* %a.addr, align 32
-  %1 = load <4 x i32>, <4 x i32>* %b.addr, align 32
+  store <4 x i32> %a, ptr %a.addr, align 32
+  store <4 x i32> %b, ptr %b.addr, align 32
+  %0 = load <4 x i32>, ptr %a.addr, align 32
+  %1 = load <4 x i32>, ptr %b.addr, align 32
   %shr = ashr <4 x i32> %0, %1
   ret <4 x i32> %shr
 }
@@ -38,10 +38,10 @@ define <8 x i16> @testShiftRightLogical(<8 x i16> %a, <8 x i16> %b) #0 {
 entry:
   %a.addr = alloca <8 x i16>, align 16
   %b.addr = alloca <8 x i16>, align 16
-  store <8 x i16> %a, <8 x i16>* %a.addr, align 16
-  store <8 x i16> %b, <8 x i16>* %b.addr, align 16
-  %0 = load <8 x i16>, <8 x i16>* %a.addr, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b.addr, align 16
+  store <8 x i16> %a, ptr %a.addr, align 16
+  store <8 x i16> %b, ptr %b.addr, align 16
+  %0 = load <8 x i16>, ptr %a.addr, align 16
+  %1 = load <8 x i16>, ptr %b.addr, align 16
   %shr = lshr <8 x i16> %0, %1
   ret <8 x i16> %shr
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll b/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
index 8275c66cb5b5f..b225d9a1acaf5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshuffle.ll
@@ -28,7 +28,7 @@ bb:
   ret <8 x i1> %Shuff
 }
 
-define <16 x i1> @test3(i1* %ptr, i32 %v) {
+define <16 x i1> @test3(ptr %ptr, i32 %v) {
 ; CHECK-LABEL: test3:
 ; CHECK:       ; %bb.0: ; %bb
 ; CHECK-NEXT:    movi.2d v0, #0x0000ff000000ff
@@ -57,7 +57,7 @@ bb:
 ; CHECK:         .byte   0                       ; 0x0
 ; CHECK:         .byte   0                       ; 0x0
 ; CHECK:         .byte   0                       ; 0x0
-define <16 x i1> @test4(i1* %ptr, i32 %v) {
+define <16 x i1> @test4(ptr %ptr, i32 %v) {
 ; CHECK-LABEL: _test4:
 ; CHECK:         adrp    x[[REG3:[0-9]+]], lCPI3_0 at PAGE
 ; CHECK:         ldr     q[[REG2:[0-9]+]], [x[[REG3]], lCPI3_0 at PAGEOFF]

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vsqrt.ll b/llvm/test/CodeGen/AArch64/arm64-vsqrt.ll
index 5052f60f2ceec..fcf709109d5b1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vsqrt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vsqrt.ll
@@ -1,28 +1,28 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <2 x float> @frecps_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @frecps_2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: frecps_2s:
 ;CHECK: frecps.2s
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @frecps_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @frecps_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: frecps_4s:
 ;CHECK: frecps.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @frecps_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @frecps_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: frecps_2d:
 ;CHECK: frecps.2d
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -32,29 +32,29 @@ declare <4 x float> @llvm.aarch64.neon.frecps.v4f32(<4 x float>, <4 x float>) no
 declare <2 x double> @llvm.aarch64.neon.frecps.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
 
-define <2 x float> @frsqrts_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
+define <2 x float> @frsqrts_2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: frsqrts_2s:
 ;CHECK: frsqrts.2s
-	%tmp1 = load <2 x float>, <2 x float>* %A
-	%tmp2 = load <2 x float>, <2 x float>* %B
+	%tmp1 = load <2 x float>, ptr %A
+	%tmp2 = load <2 x float>, ptr %B
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @frsqrts_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @frsqrts_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: frsqrts_4s:
 ;CHECK: frsqrts.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @frsqrts_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
+define <2 x double> @frsqrts_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: frsqrts_2d:
 ;CHECK: frsqrts.2d
-	%tmp1 = load <2 x double>, <2 x double>* %A
-	%tmp2 = load <2 x double>, <2 x double>* %B
+	%tmp1 = load <2 x double>, ptr %A
+	%tmp2 = load <2 x double>, ptr %B
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
 	ret <2 x double> %tmp3
 }
@@ -63,42 +63,42 @@ declare <2 x float> @llvm.aarch64.neon.frsqrts.v2f32(<2 x float>, <2 x float>) n
 declare <4 x float> @llvm.aarch64.neon.frsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
 declare <2 x double> @llvm.aarch64.neon.frsqrts.v2f64(<2 x double>, <2 x double>) nounwind readnone
 
-define <2 x float> @frecpe_2s(<2 x float>* %A) nounwind {
+define <2 x float> @frecpe_2s(ptr %A) nounwind {
 ;CHECK-LABEL: frecpe_2s:
 ;CHECK: frecpe.2s
-	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp1 = load <2 x float>, ptr %A
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frecpe.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @frecpe_4s(<4 x float>* %A) nounwind {
+define <4 x float> @frecpe_4s(ptr %A) nounwind {
 ;CHECK-LABEL: frecpe_4s:
 ;CHECK: frecpe.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp1 = load <4 x float>, ptr %A
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frecpe.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @frecpe_2d(<2 x double>* %A) nounwind {
+define <2 x double> @frecpe_2d(ptr %A) nounwind {
 ;CHECK-LABEL: frecpe_2d:
 ;CHECK: frecpe.2d
-	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp1 = load <2 x double>, ptr %A
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frecpe.v2f64(<2 x double> %tmp1)
 	ret <2 x double> %tmp3
 }
 
-define float @frecpe_s(float* %A) nounwind {
+define float @frecpe_s(ptr %A) nounwind {
 ;CHECK-LABEL: frecpe_s:
 ;CHECK: frecpe s0, {{s[0-9]+}}
-  %tmp1 = load float, float* %A
+  %tmp1 = load float, ptr %A
   %tmp3 = call float @llvm.aarch64.neon.frecpe.f32(float %tmp1)
   ret float %tmp3
 }
 
-define double @frecpe_d(double* %A) nounwind {
+define double @frecpe_d(ptr %A) nounwind {
 ;CHECK-LABEL: frecpe_d:
 ;CHECK: frecpe d0, {{d[0-9]+}}
-  %tmp1 = load double, double* %A
+  %tmp1 = load double, ptr %A
   %tmp3 = call double @llvm.aarch64.neon.frecpe.f64(double %tmp1)
   ret double %tmp3
 }
@@ -109,18 +109,18 @@ declare <2 x double> @llvm.aarch64.neon.frecpe.v2f64(<2 x double>) nounwind read
 declare float @llvm.aarch64.neon.frecpe.f32(float) nounwind readnone
 declare double @llvm.aarch64.neon.frecpe.f64(double) nounwind readnone
 
-define float @frecpx_s(float* %A) nounwind {
+define float @frecpx_s(ptr %A) nounwind {
 ;CHECK-LABEL: frecpx_s:
 ;CHECK: frecpx s0, {{s[0-9]+}}
-  %tmp1 = load float, float* %A
+  %tmp1 = load float, ptr %A
   %tmp3 = call float @llvm.aarch64.neon.frecpx.f32(float %tmp1)
   ret float %tmp3
 }
 
-define double @frecpx_d(double* %A) nounwind {
+define double @frecpx_d(ptr %A) nounwind {
 ;CHECK-LABEL: frecpx_d:
 ;CHECK: frecpx d0, {{d[0-9]+}}
-  %tmp1 = load double, double* %A
+  %tmp1 = load double, ptr %A
   %tmp3 = call double @llvm.aarch64.neon.frecpx.f64(double %tmp1)
   ret double %tmp3
 }
@@ -128,42 +128,42 @@ define double @frecpx_d(double* %A) nounwind {
 declare float @llvm.aarch64.neon.frecpx.f32(float) nounwind readnone
 declare double @llvm.aarch64.neon.frecpx.f64(double) nounwind readnone
 
-define <2 x float> @frsqrte_2s(<2 x float>* %A) nounwind {
+define <2 x float> @frsqrte_2s(ptr %A) nounwind {
 ;CHECK-LABEL: frsqrte_2s:
 ;CHECK: frsqrte.2s
-	%tmp1 = load <2 x float>, <2 x float>* %A
+	%tmp1 = load <2 x float>, ptr %A
 	%tmp3 = call <2 x float> @llvm.aarch64.neon.frsqrte.v2f32(<2 x float> %tmp1)
 	ret <2 x float> %tmp3
 }
 
-define <4 x float> @frsqrte_4s(<4 x float>* %A) nounwind {
+define <4 x float> @frsqrte_4s(ptr %A) nounwind {
 ;CHECK-LABEL: frsqrte_4s:
 ;CHECK: frsqrte.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
+	%tmp1 = load <4 x float>, ptr %A
 	%tmp3 = call <4 x float> @llvm.aarch64.neon.frsqrte.v4f32(<4 x float> %tmp1)
 	ret <4 x float> %tmp3
 }
 
-define <2 x double> @frsqrte_2d(<2 x double>* %A) nounwind {
+define <2 x double> @frsqrte_2d(ptr %A) nounwind {
 ;CHECK-LABEL: frsqrte_2d:
 ;CHECK: frsqrte.2d
-	%tmp1 = load <2 x double>, <2 x double>* %A
+	%tmp1 = load <2 x double>, ptr %A
 	%tmp3 = call <2 x double> @llvm.aarch64.neon.frsqrte.v2f64(<2 x double> %tmp1)
 	ret <2 x double> %tmp3
 }
 
-define float @frsqrte_s(float* %A) nounwind {
+define float @frsqrte_s(ptr %A) nounwind {
 ;CHECK-LABEL: frsqrte_s:
 ;CHECK: frsqrte s0, {{s[0-9]+}}
-  %tmp1 = load float, float* %A
+  %tmp1 = load float, ptr %A
   %tmp3 = call float @llvm.aarch64.neon.frsqrte.f32(float %tmp1)
   ret float %tmp3
 }
 
-define double @frsqrte_d(double* %A) nounwind {
+define double @frsqrte_d(ptr %A) nounwind {
 ;CHECK-LABEL: frsqrte_d:
 ;CHECK: frsqrte d0, {{d[0-9]+}}
-  %tmp1 = load double, double* %A
+  %tmp1 = load double, ptr %A
   %tmp3 = call double @llvm.aarch64.neon.frsqrte.f64(double %tmp1)
   ret double %tmp3
 }
@@ -174,18 +174,18 @@ declare <2 x double> @llvm.aarch64.neon.frsqrte.v2f64(<2 x double>) nounwind rea
 declare float @llvm.aarch64.neon.frsqrte.f32(float) nounwind readnone
 declare double @llvm.aarch64.neon.frsqrte.f64(double) nounwind readnone
 
-define <2 x i32> @urecpe_2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @urecpe_2s(ptr %A) nounwind {
 ;CHECK-LABEL: urecpe_2s:
 ;CHECK: urecpe.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.urecpe.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @urecpe_4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @urecpe_4s(ptr %A) nounwind {
 ;CHECK-LABEL: urecpe_4s:
 ;CHECK: urecpe.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp1 = load <4 x i32>, ptr %A
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.urecpe.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp3
 }
@@ -193,18 +193,18 @@ define <4 x i32> @urecpe_4s(<4 x i32>* %A) nounwind {
 declare <2 x i32> @llvm.aarch64.neon.urecpe.v2i32(<2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.aarch64.neon.urecpe.v4i32(<4 x i32>) nounwind readnone
 
-define <2 x i32> @ursqrte_2s(<2 x i32>* %A) nounwind {
+define <2 x i32> @ursqrte_2s(ptr %A) nounwind {
 ;CHECK-LABEL: ursqrte_2s:
 ;CHECK: ursqrte.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
+	%tmp1 = load <2 x i32>, ptr %A
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.ursqrte.v2i32(<2 x i32> %tmp1)
 	ret <2 x i32> %tmp3
 }
 
-define <4 x i32> @ursqrte_4s(<4 x i32>* %A) nounwind {
+define <4 x i32> @ursqrte_4s(ptr %A) nounwind {
 ;CHECK-LABEL: ursqrte_4s:
 ;CHECK: ursqrte.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
+	%tmp1 = load <4 x i32>, ptr %A
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.ursqrte.v4i32(<4 x i32> %tmp1)
 	ret <4 x i32> %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vsra.ll b/llvm/test/CodeGen/AArch64/arm64-vsra.ll
index 15364f4001cb1..fa6a40c0181d0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vsra.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vsra.ll
@@ -1,141 +1,141 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vsras8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsras8:
 ;CHECK: ssra.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
 }
 
-define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @vsras16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsras16:
 ;CHECK: ssra.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
 }
 
-define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @vsras32(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsras32:
 ;CHECK: ssra.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 >
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
 }
 
-define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vsraQs8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQs8:
 ;CHECK: ssra.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vsraQs16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQs16:
 ;CHECK: ssra.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @vsraQs32(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQs32:
 ;CHECK: ssra.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @vsraQs64(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQs64:
 ;CHECK: ssra.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 >
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4
 }
 
-define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vsrau8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsrau8:
 ;CHECK: usra.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <8 x i8> %tmp1, %tmp3
 	ret <8 x i8> %tmp4
 }
 
-define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @vsrau16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsrau16:
 ;CHECK: usra.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <4 x i16> %tmp1, %tmp3
 	ret <4 x i16> %tmp4
 }
 
-define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i32> @vsrau32(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsrau32:
 ;CHECK: usra.2s
-	%tmp1 = load <2 x i32>, <2 x i32>* %A
-	%tmp2 = load <2 x i32>, <2 x i32>* %B
+	%tmp1 = load <2 x i32>, ptr %A
+	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 >
         %tmp4 = add <2 x i32> %tmp1, %tmp3
 	ret <2 x i32> %tmp4
 }
 
 
-define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vsraQu8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQu8:
 ;CHECK: usra.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
         %tmp4 = add <16 x i8> %tmp1, %tmp3
 	ret <16 x i8> %tmp4
 }
 
-define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vsraQu16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQu16:
 ;CHECK: usra.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
         %tmp4 = add <8 x i16> %tmp1, %tmp3
 	ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @vsraQu32(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQu32:
 ;CHECK: usra.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
         %tmp4 = add <4 x i32> %tmp1, %tmp3
 	ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i64> @vsraQu64(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vsraQu64:
 ;CHECK: usra.2d
-	%tmp1 = load <2 x i64>, <2 x i64>* %A
-	%tmp2 = load <2 x i64>, <2 x i64>* %B
+	%tmp1 = load <2 x i64>, ptr %A
+	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
         %tmp4 = add <2 x i64> %tmp1, %tmp3
 	ret <2 x i64> %tmp4

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vsub.ll b/llvm/test/CodeGen/AArch64/arm64-vsub.ll
index 6746e49989cbd..521712eed8e4b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vsub.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vsub.ll
@@ -1,28 +1,28 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @subhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i8> @subhn8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn8b:
 ;CHECK: subhn.8b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @subhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i16> @subhn4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn4h:
 ;CHECK: subhn.4h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @subhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i32> @subhn2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: subhn2s:
 ;CHECK: subhn.2s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -61,29 +61,29 @@ declare <2 x i32> @llvm.aarch64.neon.subhn.v2i32(<2 x i64>, <2 x i64>) nounwind
 declare <4 x i16> @llvm.aarch64.neon.subhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
 declare <8 x i8> @llvm.aarch64.neon.subhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
 
-define <8 x i8> @rsubhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i8> @rsubhn8b(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: rsubhn8b:
 ;CHECK: rsubhn.8b
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i16>, ptr %B
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
         ret <8 x i8> %tmp3
 }
 
-define <4 x i16> @rsubhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i16> @rsubhn4h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: rsubhn4h:
 ;CHECK: rsubhn.4h
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i32>, ptr %B
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
         ret <4 x i16> %tmp3
 }
 
-define <2 x i32> @rsubhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
+define <2 x i32> @rsubhn2s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: rsubhn2s:
 ;CHECK: rsubhn.2s
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i64>, <2 x i64>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i64>, ptr %B
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
         ret <2 x i32> %tmp3
 }
@@ -122,47 +122,47 @@ declare <2 x i32> @llvm.aarch64.neon.rsubhn.v2i32(<2 x i64>, <2 x i64>) nounwind
 declare <4 x i16> @llvm.aarch64.neon.rsubhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
 declare <8 x i8> @llvm.aarch64.neon.rsubhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
 
-define <8 x i16> @ssubl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @ssubl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubl8h:
 ;CHECK: ssubl.8h
-        %tmp1 = load <8 x i8>, <8 x i8>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i8>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = sub <8 x i16> %tmp3, %tmp4
         ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @ssubl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @ssubl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubl4s:
 ;CHECK: ssubl.4s
-        %tmp1 = load <4 x i16>, <4 x i16>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i16>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
         ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @ssubl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @ssubl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubl2d:
 ;CHECK: ssubl.2d
-        %tmp1 = load <2 x i32>, <2 x i32>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i32>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
         ret <2 x i64> %tmp5
 }
 
-define <8 x i16> @ssubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @ssubl2_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubl2_8h:
 ;CHECK: ssubl.8h
-        %tmp1 = load <16 x i8>, <16 x i8>* %A
+        %tmp1 = load <16 x i8>, ptr %A
         %high1 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext1 = sext <8 x i8> %high1 to <8 x i16>
 
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp2 = load <16 x i8>, ptr %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = sext <8 x i8> %high2 to <8 x i16>
 
@@ -170,14 +170,14 @@ define <8 x i16> @ssubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
         ret <8 x i16> %res
 }
 
-define <4 x i32> @ssubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @ssubl2_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubl2_4s:
 ;CHECK: ssubl.4s
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
         %high1 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext1 = sext <4 x i16> %high1 to <4 x i32>
 
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp2 = load <8 x i16>, ptr %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = sext <4 x i16> %high2 to <4 x i32>
 
@@ -185,14 +185,14 @@ define <4 x i32> @ssubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
         ret <4 x i32> %res
 }
 
-define <2 x i64> @ssubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @ssubl2_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubl2_2d:
 ;CHECK: ssubl.2d
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
         %high1 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext1 = sext <2 x i32> %high1 to <2 x i64>
 
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp2 = load <4 x i32>, ptr %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = sext <2 x i32> %high2 to <2 x i64>
 
@@ -200,47 +200,47 @@ define <2 x i64> @ssubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
         ret <2 x i64> %res
 }
 
-define <8 x i16> @usubl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @usubl8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubl8h:
 ;CHECK: usubl.8h
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
-  %tmp2 = load <8 x i8>, <8 x i8>* %B
+  %tmp1 = load <8 x i8>, ptr %A
+  %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
   %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp5 = sub <8 x i16> %tmp3, %tmp4
   ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @usubl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @usubl4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubl4s:
 ;CHECK: usubl.4s
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
-  %tmp2 = load <4 x i16>, <4 x i16>* %B
+  %tmp1 = load <4 x i16>, ptr %A
+  %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
   %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp5 = sub <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
 
-define <2 x i64> @usubl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @usubl2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubl2d:
 ;CHECK: usubl.2d
-  %tmp1 = load <2 x i32>, <2 x i32>* %A
-  %tmp2 = load <2 x i32>, <2 x i32>* %B
+  %tmp1 = load <2 x i32>, ptr %A
+  %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
   %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp5 = sub <2 x i64> %tmp3, %tmp4
   ret <2 x i64> %tmp5
 }
 
-define <8 x i16> @usubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @usubl2_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubl2_8h:
 ;CHECK: usubl.8h
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
+  %tmp1 = load <16 x i8>, ptr %A
   %high1 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %ext1 = zext <8 x i8> %high1 to <8 x i16>
 
-  %tmp2 = load <16 x i8>, <16 x i8>* %B
+  %tmp2 = load <16 x i8>, ptr %B
   %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %ext2 = zext <8 x i8> %high2 to <8 x i16>
 
@@ -248,14 +248,14 @@ define <8 x i16> @usubl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
   ret <8 x i16> %res
 }
 
-define <4 x i32> @usubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @usubl2_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubl2_4s:
 ;CHECK: usubl.4s
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %high1 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext1 = zext <4 x i16> %high1 to <4 x i32>
 
-  %tmp2 = load <8 x i16>, <8 x i16>* %B
+  %tmp2 = load <8 x i16>, ptr %B
   %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext2 = zext <4 x i16> %high2 to <4 x i32>
 
@@ -263,14 +263,14 @@ define <4 x i32> @usubl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
   ret <4 x i32> %res
 }
 
-define <2 x i64> @usubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @usubl2_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubl2_2d:
 ;CHECK: usubl.2d
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp1 = load <4 x i32>, ptr %A
   %high1 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %ext1 = zext <2 x i32> %high1 to <2 x i64>
 
-  %tmp2 = load <4 x i32>, <4 x i32>* %B
+  %tmp2 = load <4 x i32>, ptr %B
   %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
   %ext2 = zext <2 x i32> %high2 to <2 x i64>
 
@@ -278,42 +278,42 @@ define <2 x i64> @usubl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
   ret <2 x i64> %res
 }
 
-define <8 x i16> @ssubw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @ssubw8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubw8h:
 ;CHECK: ssubw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
   %tmp4 = sub <8 x i16> %tmp1, %tmp3
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @ssubw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @ssubw4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubw4s:
 ;CHECK: ssubw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
   %tmp4 = sub <4 x i32> %tmp1, %tmp3
         ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @ssubw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @ssubw2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubw2d:
 ;CHECK: ssubw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
   %tmp4 = sub <2 x i64> %tmp1, %tmp3
         ret <2 x i64> %tmp4
 }
 
-define <8 x i16> @ssubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @ssubw2_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubw2_8h:
 ;CHECK: ssubw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
 
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp2 = load <16 x i8>, ptr %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = sext <8 x i8> %high2 to <8 x i16>
 
@@ -321,12 +321,12 @@ define <8 x i16> @ssubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
         ret <8 x i16> %res
 }
 
-define <4 x i32> @ssubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @ssubw2_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubw2_4s:
 ;CHECK: ssubw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
 
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp2 = load <8 x i16>, ptr %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = sext <4 x i16> %high2 to <4 x i32>
 
@@ -334,12 +334,12 @@ define <4 x i32> @ssubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
         ret <4 x i32> %res
 }
 
-define <2 x i64> @ssubw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @ssubw2_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: ssubw2_2d:
 ;CHECK: ssubw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
 
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp2 = load <4 x i32>, ptr %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = sext <2 x i32> %high2 to <2 x i64>
 
@@ -347,42 +347,42 @@ define <2 x i64> @ssubw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
         ret <2 x i64> %res
 }
 
-define <8 x i16> @usubw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
+define <8 x i16> @usubw8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubw8h:
 ;CHECK: usubw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
-        %tmp2 = load <8 x i8>, <8 x i8>* %B
+        %tmp1 = load <8 x i16>, ptr %A
+        %tmp2 = load <8 x i8>, ptr %B
   %tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
   %tmp4 = sub <8 x i16> %tmp1, %tmp3
         ret <8 x i16> %tmp4
 }
 
-define <4 x i32> @usubw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
+define <4 x i32> @usubw4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubw4s:
 ;CHECK: usubw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
-        %tmp2 = load <4 x i16>, <4 x i16>* %B
+        %tmp1 = load <4 x i32>, ptr %A
+        %tmp2 = load <4 x i16>, ptr %B
   %tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
   %tmp4 = sub <4 x i32> %tmp1, %tmp3
         ret <4 x i32> %tmp4
 }
 
-define <2 x i64> @usubw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
+define <2 x i64> @usubw2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubw2d:
 ;CHECK: usubw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
-        %tmp2 = load <2 x i32>, <2 x i32>* %B
+        %tmp1 = load <2 x i64>, ptr %A
+        %tmp2 = load <2 x i32>, ptr %B
   %tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
   %tmp4 = sub <2 x i64> %tmp1, %tmp3
         ret <2 x i64> %tmp4
 }
 
-define <8 x i16> @usubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+define <8 x i16> @usubw2_8h(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubw2_8h:
 ;CHECK: usubw.8h
-        %tmp1 = load <8 x i16>, <8 x i16>* %A
+        %tmp1 = load <8 x i16>, ptr %A
 
-        %tmp2 = load <16 x i8>, <16 x i8>* %B
+        %tmp2 = load <16 x i8>, ptr %B
         %high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         %ext2 = zext <8 x i8> %high2 to <8 x i16>
 
@@ -390,12 +390,12 @@ define <8 x i16> @usubw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
         ret <8 x i16> %res
 }
 
-define <4 x i32> @usubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+define <4 x i32> @usubw2_4s(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubw2_4s:
 ;CHECK: usubw.4s
-        %tmp1 = load <4 x i32>, <4 x i32>* %A
+        %tmp1 = load <4 x i32>, ptr %A
 
-        %tmp2 = load <8 x i16>, <8 x i16>* %B
+        %tmp2 = load <8 x i16>, ptr %B
         %high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
         %ext2 = zext <4 x i16> %high2 to <4 x i32>
 
@@ -403,12 +403,12 @@ define <4 x i32> @usubw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
         ret <4 x i32> %res
 }
 
-define <2 x i64> @usubw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+define <2 x i64> @usubw2_2d(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: usubw2_2d:
 ;CHECK: usubw.2d
-        %tmp1 = load <2 x i64>, <2 x i64>* %A
+        %tmp1 = load <2 x i64>, ptr %A
 
-        %tmp2 = load <4 x i32>, <4 x i32>* %B
+        %tmp2 = load <4 x i32>, ptr %B
         %high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
         %ext2 = zext <2 x i32> %high2 to <2 x i64>
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-weak-reference.ll b/llvm/test/CodeGen/AArch64/arm64-weak-reference.ll
index e8074def4e6e4..d1510b277a044 100644
--- a/llvm/test/CodeGen/AArch64/arm64-weak-reference.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-weak-reference.ll
@@ -5,6 +5,6 @@
 define i32 @fn() nounwind ssp {
 ; CHECK-LABEL: fn:
 ; CHECK: .weak_reference
-  %val = load i32, i32* @x, align 4
+  %val = load i32, ptr @x, align 4
   ret i32 %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-windows-calls.ll b/llvm/test/CodeGen/AArch64/arm64-windows-calls.ll
index e12e2c91a0e45..c8caee2b49a8c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-windows-calls.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-windows-calls.ll
@@ -11,13 +11,11 @@ entry:
 ; CHECK-DAG: mov x0, xzr
 
   %retval = alloca %struct.S1, align 4
-  %a = getelementptr inbounds %struct.S1, %struct.S1* %retval, i32 0, i32 0
-  store i32 0, i32* %a, align 4
-  %b = getelementptr inbounds %struct.S1, %struct.S1* %retval, i32 0, i32 1
-  store i32 0, i32* %b, align 4
-  %0 = bitcast %struct.S1* %retval to i64*
-  %1 = load i64, i64* %0, align 4
-  ret i64 %1
+  store i32 0, ptr %retval, align 4
+  %b = getelementptr inbounds %struct.S1, ptr %retval, i32 0, i32 1
+  store i32 0, ptr %b, align 4
+  %0 = load i64, ptr %retval, align 4
+  ret i64 %0
 }
 
 ; Returns <= 16 bytes should be in X0/X1.
@@ -36,50 +34,46 @@ entry:
 ; CHECK-NEXT:    add     sp, sp, #16
 
   %retval = alloca %struct.S2, align 4
-  %a = getelementptr inbounds %struct.S2, %struct.S2* %retval, i32 0, i32 0
-  store i32 0, i32* %a, align 4
-  %b = getelementptr inbounds %struct.S2, %struct.S2* %retval, i32 0, i32 1
-  store i32 0, i32* %b, align 4
-  %c = getelementptr inbounds %struct.S2, %struct.S2* %retval, i32 0, i32 2
-  store i32 0, i32* %c, align 4
-  %d = getelementptr inbounds %struct.S2, %struct.S2* %retval, i32 0, i32 3
-  store i32 0, i32* %d, align 4
-  %0 = bitcast %struct.S2* %retval to [2 x i64]*
-  %1 = load [2 x i64], [2 x i64]* %0, align 4
-  ret [2 x i64] %1
+  store i32 0, ptr %retval, align 4
+  %b = getelementptr inbounds %struct.S2, ptr %retval, i32 0, i32 1
+  store i32 0, ptr %b, align 4
+  %c = getelementptr inbounds %struct.S2, ptr %retval, i32 0, i32 2
+  store i32 0, ptr %c, align 4
+  %d = getelementptr inbounds %struct.S2, ptr %retval, i32 0, i32 3
+  store i32 0, ptr %d, align 4
+  %0 = load [2 x i64], ptr %retval, align 4
+  ret [2 x i64] %0
 }
 
 ; Arguments > 16 bytes should be passed in X8.
 %struct.S3 = type { i32, i32, i32, i32, i32 }
-define dso_local void @"?f3"(%struct.S3* noalias sret(%struct.S3) %agg.result) {
+define dso_local void @"?f3"(ptr noalias sret(%struct.S3) %agg.result) {
 entry:
 ; CHECK-LABEL: f3
 ; CHECK: stp xzr, xzr, [x8]
 ; CHECK: str wzr, [x8, #16]
 
-  %a = getelementptr inbounds %struct.S3, %struct.S3* %agg.result, i32 0, i32 0
-  store i32 0, i32* %a, align 4
-  %b = getelementptr inbounds %struct.S3, %struct.S3* %agg.result, i32 0, i32 1
-  store i32 0, i32* %b, align 4
-  %c = getelementptr inbounds %struct.S3, %struct.S3* %agg.result, i32 0, i32 2
-  store i32 0, i32* %c, align 4
-  %d = getelementptr inbounds %struct.S3, %struct.S3* %agg.result, i32 0, i32 3
-  store i32 0, i32* %d, align 4
-  %e = getelementptr inbounds %struct.S3, %struct.S3* %agg.result, i32 0, i32 4
-  store i32 0, i32* %e, align 4
+  store i32 0, ptr %agg.result, align 4
+  %b = getelementptr inbounds %struct.S3, ptr %agg.result, i32 0, i32 1
+  store i32 0, ptr %b, align 4
+  %c = getelementptr inbounds %struct.S3, ptr %agg.result, i32 0, i32 2
+  store i32 0, ptr %c, align 4
+  %d = getelementptr inbounds %struct.S3, ptr %agg.result, i32 0, i32 3
+  store i32 0, ptr %d, align 4
+  %e = getelementptr inbounds %struct.S3, ptr %agg.result, i32 0, i32 4
+  store i32 0, ptr %e, align 4
   ret void
 }
 
 ; InReg arguments to non-instance methods must be passed in X0 and returns in
 ; X0.
 %class.B = type { i32 }
-define dso_local void @"?f4"(%class.B* inreg noalias nocapture sret(%class.B) %agg.result) {
+define dso_local void @"?f4"(ptr inreg noalias nocapture sret(%class.B) %agg.result) {
 entry:
 ; CHECK-LABEL: f4
 ; CHECK: mov w8, #1
 ; CHECK: str w8, [x0]
-  %X.i = getelementptr inbounds %class.B, %class.B* %agg.result, i64 0, i32 0
-  store i32 1, i32* %X.i, align 4
+  store i32 1, ptr %agg.result, align 4
   ret void
 }
 
@@ -87,15 +81,15 @@ entry:
 %class.C = type { i8 }
 %class.A = type { i8 }
 
-define dso_local void @"?inst at C"(%class.C* %this, %class.A* inreg noalias sret(%class.A) %agg.result) {
+define dso_local void @"?inst at C"(ptr %this, ptr inreg noalias sret(%class.A) %agg.result) {
 entry:
 ; CHECK-LABEL: inst at C
 ; CHECK-DAG: mov x0, x1
 ; CHECK-DAG: str x8, [sp, #8]
 
-  %this.addr = alloca %class.C*, align 8
-  store %class.C* %this, %class.C** %this.addr, align 8
-  %this1 = load %class.C*, %class.C** %this.addr, align 8
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
   ret void
 }
 
@@ -112,29 +106,26 @@ entry:
 %struct.NotPod = type { %struct.NotCXX14Aggregate }
 
 ; CHECK-LABEL: copy_pod:
-define dso_local %struct.Pod @copy_pod(%struct.Pod* %x) {
-  %x1 = load %struct.Pod, %struct.Pod* %x, align 8
+define dso_local %struct.Pod @copy_pod(ptr %x) {
+  %x1 = load %struct.Pod, ptr %x, align 8
   ret %struct.Pod %x1
   ; CHECK: ldp d0, d1, [x0]
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
 
 ; CHECK-LABEL: copy_notcxx14aggregate:
 define dso_local void
- at copy_notcxx14aggregate(%struct.NotCXX14Aggregate* inreg noalias sret(%struct.NotCXX14Aggregate) align 8 %agg.result,
-                        %struct.NotCXX14Aggregate* %x) {
-  %1 = bitcast %struct.NotCXX14Aggregate* %agg.result to i8*
-  %2 = bitcast %struct.NotCXX14Aggregate* %x to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %1, i8* align 8 %2, i64 16, i1 false)
+ at copy_notcxx14aggregate(ptr inreg noalias sret(%struct.NotCXX14Aggregate) align 8 %agg.result,
+                        ptr %x) {
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %agg.result, ptr align 8 %x, i64 16, i1 false)
   ret void
   ; CHECK: str q0, [x0]
 }
 
 ; CHECK-LABEL: copy_notpod:
-define dso_local [2 x i64] @copy_notpod(%struct.NotPod* %x) {
-  %x1 = bitcast %struct.NotPod* %x to [2 x i64]*
-  %x2 = load [2 x i64], [2 x i64]* %x1
+define dso_local [2 x i64] @copy_notpod(ptr %x) {
+  %x2 = load [2 x i64], ptr %x
   ret [2 x i64] %x2
   ; CHECK: ldp x8, x1, [x0]
   ; CHECK: mov x0, x8
@@ -144,8 +135,8 @@ define dso_local [2 x i64] @copy_notpod(%struct.NotPod* %x) {
 
 ; CHECK-LABEL: call_copy_pod:
 define void @call_copy_pod() {
-  %x = call %struct.Pod @copy_pod(%struct.Pod* @Pod)
-  store %struct.Pod %x, %struct.Pod* @Pod
+  %x = call %struct.Pod @copy_pod(ptr @Pod)
+  store %struct.Pod %x, ptr @Pod
   ret void
   ; CHECK: bl copy_pod
   ; CHECK-NEXT: str d0, [{{.*}}]
@@ -157,9 +148,9 @@ define void @call_copy_pod() {
 ; CHECK-LABEL: call_copy_notcxx14aggregate:
 define void @call_copy_notcxx14aggregate() {
   %x = alloca %struct.NotCXX14Aggregate
-  call void @copy_notcxx14aggregate(%struct.NotCXX14Aggregate* %x, %struct.NotCXX14Aggregate* @NotCXX14Aggregate)
-  %x1 = load %struct.NotCXX14Aggregate, %struct.NotCXX14Aggregate* %x
-  store %struct.NotCXX14Aggregate %x1, %struct.NotCXX14Aggregate* @NotCXX14Aggregate
+  call void @copy_notcxx14aggregate(ptr %x, ptr @NotCXX14Aggregate)
+  %x1 = load %struct.NotCXX14Aggregate, ptr %x
+  store %struct.NotCXX14Aggregate %x1, ptr @NotCXX14Aggregate
   ret void
   ; CHECK: bl copy_notcxx14aggregate
   ; CHECK-NEXT: ldp {{.*}}, {{.*}}, [sp]
@@ -169,9 +160,8 @@ define void @call_copy_notcxx14aggregate() {
 
 ; CHECK-LABEL: call_copy_notpod:
 define void @call_copy_notpod() {
-  %x = call [2 x i64] @copy_notpod(%struct.NotPod* @NotPod)
-  %notpod = bitcast %struct.NotPod* @NotPod to [2 x i64]*
-  store [2 x i64] %x, [2 x i64]* %notpod
+  %x = call [2 x i64] @copy_notpod(ptr @NotPod)
+  store [2 x i64] %x, ptr @NotPod
   ret void
   ; CHECK: bl copy_notpod
   ; CHECK-NEXT: stp x0, x1, [{{.*}}]
@@ -179,12 +169,12 @@ define void @call_copy_notpod() {
 
 ; We shouldn't return the argument
 ; when it has only inreg attribute
-define i64 @foobar(i64* inreg %0) {
+define i64 @foobar(ptr inreg %0) {
 ; CHECK-LABEL: foobar:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %1 = load i64, i64* %0
+  %1 = load i64, ptr %0
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-windows-tailcall.ll b/llvm/test/CodeGen/AArch64/arm64-windows-tailcall.ll
index 9694994386c7c..55799d0dcb2d2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-windows-tailcall.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-windows-tailcall.ll
@@ -4,15 +4,15 @@
 
 %class.C = type { [1 x i32] }
 
-define dso_local void @"?bar"(%class.C* inreg noalias sret(%class.C) %agg.result) {
+define dso_local void @"?bar"(ptr inreg noalias sret(%class.C) %agg.result) {
 entry:
 ; CHECK-LABEL: bar
 ; CHECK: mov x19, x0
 ; CHECK: bl "?foo"
 ; CHECK: mov x0, x19
 
-  tail call void @"?foo"(%class.C* dereferenceable(4) %agg.result)
+  tail call void @"?foo"(ptr dereferenceable(4) %agg.result)
   ret void
 }
 
-declare dso_local void @"?foo"(%class.C* dereferenceable(4))
+declare dso_local void @"?foo"(ptr dereferenceable(4))

diff  --git a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
index b283fcaf13e21..d2ba50ce6a80b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -6,7 +6,7 @@
 ;
 ; Get the actual value of the overflow bit.
 ;
-define zeroext i1 @saddo1.i32(i32 %v1, i32 %v2, i32* %res) {
+define zeroext i1 @saddo1.i32(i32 %v1, i32 %v2, ptr %res) {
 ; SDAG-LABEL: saddo1.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds w8, w0, w1
@@ -32,12 +32,12 @@ entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
 ; Test the immediate version.
-define zeroext i1 @saddo2.i32(i32 %v1, i32* %res) {
+define zeroext i1 @saddo2.i32(i32 %v1, ptr %res) {
 ; SDAG-LABEL: saddo2.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds w8, w0, #4
@@ -63,12 +63,12 @@ entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
 ; Test negative immediates.
-define zeroext i1 @saddo3.i32(i32 %v1, i32* %res) {
+define zeroext i1 @saddo3.i32(i32 %v1, ptr %res) {
 ; SDAG-LABEL: saddo3.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    subs w8, w0, #4
@@ -94,12 +94,12 @@ entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
 ; Test immediates that are too large to be encoded.
-define zeroext i1 @saddo4.i32(i32 %v1, i32* %res) {
+define zeroext i1 @saddo4.i32(i32 %v1, ptr %res) {
 ; SDAG-LABEL: saddo4.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    mov w8, #16777215
@@ -128,12 +128,12 @@ entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
 ; Test shift folding.
-define zeroext i1 @saddo5.i32(i32 %v1, i32 %v2, i32* %res) {
+define zeroext i1 @saddo5.i32(i32 %v1, i32 %v2, ptr %res) {
 ; SDAG-LABEL: saddo5.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds w8, w0, w1, lsl #16
@@ -160,11 +160,11 @@ entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %lsl)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, i64* %res) {
+define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
 ; SDAG-LABEL: saddo1.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds x8, x0, x1
@@ -190,11 +190,11 @@ entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @saddo2.i64(i64 %v1, i64* %res) {
+define zeroext i1 @saddo2.i64(i64 %v1, ptr %res) {
 ; SDAG-LABEL: saddo2.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds x8, x0, #4
@@ -220,11 +220,11 @@ entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @saddo3.i64(i64 %v1, i64* %res) {
+define zeroext i1 @saddo3.i64(i64 %v1, ptr %res) {
 ; SDAG-LABEL: saddo3.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    subs x8, x0, #4
@@ -250,11 +250,11 @@ entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
+define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, ptr %res) {
 ; SDAG-LABEL: uaddo.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds w8, w0, w1
@@ -280,11 +280,11 @@ entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
+define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; SDAG-LABEL: uaddo.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds x8, x0, x1
@@ -310,11 +310,11 @@ entry:
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @ssubo1.i32(i32 %v1, i32 %v2, i32* %res) {
+define zeroext i1 @ssubo1.i32(i32 %v1, i32 %v2, ptr %res) {
 ; SDAG-LABEL: ssubo1.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    subs w8, w0, w1
@@ -340,11 +340,11 @@ entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @ssubo2.i32(i32 %v1, i32* %res) {
+define zeroext i1 @ssubo2.i32(i32 %v1, ptr %res) {
 ; SDAG-LABEL: ssubo2.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds w8, w0, #4
@@ -370,11 +370,11 @@ entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 -4)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
+define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; SDAG-LABEL: ssubo.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    subs x8, x0, x1
@@ -400,11 +400,11 @@ entry:
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
+define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, ptr %res) {
 ; SDAG-LABEL: usubo.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    subs w8, w0, w1
@@ -430,11 +430,11 @@ entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
+define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; SDAG-LABEL: usubo.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    subs x8, x0, x1
@@ -460,11 +460,11 @@ entry:
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
+define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, ptr %res) {
 ; SDAG-LABEL: smulo.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    smull x8, w0, w1
@@ -495,11 +495,11 @@ entry:
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
+define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; SDAG-LABEL: smulo.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    mul x8, x0, x1
@@ -531,11 +531,11 @@ entry:
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
+define zeroext i1 @smulo2.i64(i64 %v1, ptr %res) {
 ; SDAG-LABEL: smulo2.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds x8, x0, x0
@@ -561,11 +561,11 @@ entry:
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
+define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, ptr %res) {
 ; SDAG-LABEL: umulo.i32:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    umull x8, w0, w1
@@ -597,11 +597,11 @@ entry:
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
-  store i32 %val, i32* %res
+  store i32 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
+define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; SDAG-LABEL: umulo.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    umulh x8, x0, x1
@@ -636,11 +636,11 @@ entry:
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 
-define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
+define zeroext i1 @umulo2.i64(i64 %v1, ptr %res) {
 ; SDAG-LABEL: umulo2.i64:
 ; SDAG:       // %bb.0: // %entry
 ; SDAG-NEXT:    adds x8, x0, x0
@@ -666,7 +666,7 @@ entry:
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
-  store i64 %val, i64* %res
+  store i64 %val, ptr %res
   ret i1 %obit
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll b/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll
index f6e1bc3eaf443..ea2e6ede8566a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll
@@ -7,7 +7,7 @@ declare void @end()
 ; Test that we use the zero register before regalloc and do not unnecessarily
 ; clobber a register with the SUBS (cmp) instruction.
 ; CHECK-LABEL: func:
-define void @func(i64* %addr) {
+define void @func(ptr %addr) {
   ; We should not see any spills or reloads between begin and end
   ; CHECK: bl begin
   ; CHECK-NOT: str{{.*}}sp
@@ -15,75 +15,75 @@ define void @func(i64* %addr) {
   ; CHECK-NOT: ldr{{.*}}sp
   ; CHECK-NOT: Folded Reload
   call void @begin()
-  %v0 = load volatile i64, i64* %addr  
-  %v1 = load volatile i64, i64* %addr  
-  %v2 = load volatile i64, i64* %addr  
-  %v3 = load volatile i64, i64* %addr  
-  %v4 = load volatile i64, i64* %addr  
-  %v5 = load volatile i64, i64* %addr  
-  %v6 = load volatile i64, i64* %addr  
-  %v7 = load volatile i64, i64* %addr  
-  %v8 = load volatile i64, i64* %addr  
-  %v9 = load volatile i64, i64* %addr  
-  %v10 = load volatile i64, i64* %addr  
-  %v11 = load volatile i64, i64* %addr  
-  %v12 = load volatile i64, i64* %addr  
-  %v13 = load volatile i64, i64* %addr  
-  %v14 = load volatile i64, i64* %addr  
-  %v15 = load volatile i64, i64* %addr  
-  %v16 = load volatile i64, i64* %addr  
-  %v17 = load volatile i64, i64* %addr  
-  %v18 = load volatile i64, i64* %addr  
-  %v19 = load volatile i64, i64* %addr  
-  %v20 = load volatile i64, i64* %addr
-  %v21 = load volatile i64, i64* %addr
-  %v22 = load volatile i64, i64* %addr
-  %v23 = load volatile i64, i64* %addr
-  %v24 = load volatile i64, i64* %addr
-  %v25 = load volatile i64, i64* %addr
-  %v26 = load volatile i64, i64* %addr
-  %v27 = load volatile i64, i64* %addr
-  %v28 = load volatile i64, i64* %addr
-  %v29 = load volatile i64, i64* %addr
+  %v0 = load volatile i64, ptr %addr  
+  %v1 = load volatile i64, ptr %addr  
+  %v2 = load volatile i64, ptr %addr  
+  %v3 = load volatile i64, ptr %addr  
+  %v4 = load volatile i64, ptr %addr  
+  %v5 = load volatile i64, ptr %addr  
+  %v6 = load volatile i64, ptr %addr  
+  %v7 = load volatile i64, ptr %addr  
+  %v8 = load volatile i64, ptr %addr  
+  %v9 = load volatile i64, ptr %addr  
+  %v10 = load volatile i64, ptr %addr  
+  %v11 = load volatile i64, ptr %addr  
+  %v12 = load volatile i64, ptr %addr  
+  %v13 = load volatile i64, ptr %addr  
+  %v14 = load volatile i64, ptr %addr  
+  %v15 = load volatile i64, ptr %addr  
+  %v16 = load volatile i64, ptr %addr  
+  %v17 = load volatile i64, ptr %addr  
+  %v18 = load volatile i64, ptr %addr  
+  %v19 = load volatile i64, ptr %addr  
+  %v20 = load volatile i64, ptr %addr
+  %v21 = load volatile i64, ptr %addr
+  %v22 = load volatile i64, ptr %addr
+  %v23 = load volatile i64, ptr %addr
+  %v24 = load volatile i64, ptr %addr
+  %v25 = load volatile i64, ptr %addr
+  %v26 = load volatile i64, ptr %addr
+  %v27 = load volatile i64, ptr %addr
+  %v28 = load volatile i64, ptr %addr
+  %v29 = load volatile i64, ptr %addr
 
   %c = icmp eq i64 %v0, %v1
   br i1 %c, label %if.then, label %if.end
 
 if.then:
-  store volatile i64 %v2, i64* %addr
+  store volatile i64 %v2, ptr %addr
   br label %if.end
 
 if.end:
-  store volatile i64 %v0, i64* %addr
-  store volatile i64 %v1, i64* %addr
-  store volatile i64 %v2, i64* %addr
-  store volatile i64 %v3, i64* %addr
-  store volatile i64 %v4, i64* %addr
-  store volatile i64 %v5, i64* %addr
-  store volatile i64 %v6, i64* %addr
-  store volatile i64 %v7, i64* %addr
-  store volatile i64 %v8, i64* %addr
-  store volatile i64 %v9, i64* %addr
-  store volatile i64 %v10, i64* %addr
-  store volatile i64 %v11, i64* %addr
-  store volatile i64 %v12, i64* %addr
-  store volatile i64 %v13, i64* %addr
-  store volatile i64 %v14, i64* %addr
-  store volatile i64 %v15, i64* %addr
-  store volatile i64 %v16, i64* %addr
-  store volatile i64 %v17, i64* %addr
-  store volatile i64 %v18, i64* %addr
-  store volatile i64 %v19, i64* %addr
-  store volatile i64 %v20, i64* %addr
-  store volatile i64 %v21, i64* %addr
-  store volatile i64 %v22, i64* %addr
-  store volatile i64 %v23, i64* %addr
-  store volatile i64 %v24, i64* %addr
-  store volatile i64 %v25, i64* %addr
-  store volatile i64 %v26, i64* %addr
-  store volatile i64 %v27, i64* %addr
-  store volatile i64 %v28, i64* %addr
-  store volatile i64 %v29, i64* %addr
+  store volatile i64 %v0, ptr %addr
+  store volatile i64 %v1, ptr %addr
+  store volatile i64 %v2, ptr %addr
+  store volatile i64 %v3, ptr %addr
+  store volatile i64 %v4, ptr %addr
+  store volatile i64 %v5, ptr %addr
+  store volatile i64 %v6, ptr %addr
+  store volatile i64 %v7, ptr %addr
+  store volatile i64 %v8, ptr %addr
+  store volatile i64 %v9, ptr %addr
+  store volatile i64 %v10, ptr %addr
+  store volatile i64 %v11, ptr %addr
+  store volatile i64 %v12, ptr %addr
+  store volatile i64 %v13, ptr %addr
+  store volatile i64 %v14, ptr %addr
+  store volatile i64 %v15, ptr %addr
+  store volatile i64 %v16, ptr %addr
+  store volatile i64 %v17, ptr %addr
+  store volatile i64 %v18, ptr %addr
+  store volatile i64 %v19, ptr %addr
+  store volatile i64 %v20, ptr %addr
+  store volatile i64 %v21, ptr %addr
+  store volatile i64 %v22, ptr %addr
+  store volatile i64 %v23, ptr %addr
+  store volatile i64 %v24, ptr %addr
+  store volatile i64 %v25, ptr %addr
+  store volatile i64 %v26, ptr %addr
+  store volatile i64 %v27, ptr %addr
+  store volatile i64 %v28, ptr %addr
+  store volatile i64 %v29, ptr %addr
   ; CHECK: bl end
   call void @end()
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-zextload-unscaled.ll b/llvm/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
index 7a94bbf24d413..96908b7940edc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
@@ -2,39 +2,39 @@
 
 @var32 = global i32 0
 
-define void @test_zextloadi1_unscaled(i1* %base) {
+define void @test_zextloadi1_unscaled(ptr %base) {
 ; CHECK-LABEL: test_zextloadi1_unscaled:
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
 
-  %addr = getelementptr i1, i1* %base, i32 -7
-  %val = load i1, i1* %addr, align 1
+  %addr = getelementptr i1, ptr %base, i32 -7
+  %val = load i1, ptr %addr, align 1
 
   %extended = zext i1 %val to i32
-  store i32 %extended, i32* @var32, align 4
+  store i32 %extended, ptr @var32, align 4
   ret void
 }
 
-define void @test_zextloadi8_unscaled(i8* %base) {
+define void @test_zextloadi8_unscaled(ptr %base) {
 ; CHECK-LABEL: test_zextloadi8_unscaled:
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
 
-  %addr = getelementptr i8, i8* %base, i32 -7
-  %val = load i8, i8* %addr, align 1
+  %addr = getelementptr i8, ptr %base, i32 -7
+  %val = load i8, ptr %addr, align 1
 
   %extended = zext i8 %val to i32
-  store i32 %extended, i32* @var32, align 4
+  store i32 %extended, ptr @var32, align 4
   ret void
 }
 
-define void @test_zextloadi16_unscaled(i16* %base) {
+define void @test_zextloadi16_unscaled(ptr %base) {
 ; CHECK-LABEL: test_zextloadi16_unscaled:
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-14]
 
-  %addr = getelementptr i16, i16* %base, i32 -7
-  %val = load i16, i16* %addr, align 2
+  %addr = getelementptr i16, ptr %base, i32 -7
+  %val = load i16, ptr %addr, align 2
 
   %extended = zext i16 %val to i32
-  store i32 %extended, i32* @var32, align 4
+  store i32 %extended, ptr @var32, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll
index b32123df9219d..14772e78cbe26 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zip.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll
@@ -1,77 +1,77 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
-define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vzipi8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipi8:
 ;CHECK: zip1.8b
 ;CHECK: zip2.8b
 ;CHECK-NEXT: add.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
 	ret <8 x i8> %tmp5
 }
 
-define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @vzipi16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipi16:
 ;CHECK: zip1.4h
 ;CHECK: zip2.4h
 ;CHECK-NEXT: add.4h
-	%tmp1 = load <4 x i16>, <4 x i16>* %A
-	%tmp2 = load <4 x i16>, <4 x i16>* %B
+	%tmp1 = load <4 x i16>, ptr %A
+	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = add <4 x i16> %tmp3, %tmp4
 	ret <4 x i16> %tmp5
 }
 
-define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vzipQi8(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipQi8:
 ;CHECK: zip1.16b
 ;CHECK: zip2.16b
 ;CHECK-NEXT: add.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4
 	ret <16 x i8> %tmp5
 }
 
-define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+define <8 x i16> @vzipQi16(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipQi16:
 ;CHECK: zip1.8h
 ;CHECK: zip2.8h
 ;CHECK-NEXT: add.8h
-	%tmp1 = load <8 x i16>, <8 x i16>* %A
-	%tmp2 = load <8 x i16>, <8 x i16>* %B
+	%tmp1 = load <8 x i16>, ptr %A
+	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
         %tmp5 = add <8 x i16> %tmp3, %tmp4
 	ret <8 x i16> %tmp5
 }
 
-define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @vzipQi32(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipQi32:
 ;CHECK: zip1.4s
 ;CHECK: zip2.4s
 ;CHECK-NEXT: add.4s
-	%tmp1 = load <4 x i32>, <4 x i32>* %A
-	%tmp2 = load <4 x i32>, <4 x i32>* %B
+	%tmp1 = load <4 x i32>, ptr %A
+	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = add <4 x i32> %tmp3, %tmp4
 	ret <4 x i32> %tmp5
 }
 
-define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
+define <4 x float> @vzipQf(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipQf:
 ;CHECK: zip1.4s
 ;CHECK: zip2.4s
 ;CHECK-NEXT: fadd.4s
-	%tmp1 = load <4 x float>, <4 x float>* %A
-	%tmp2 = load <4 x float>, <4 x float>* %B
+	%tmp1 = load <4 x float>, ptr %A
+	%tmp2 = load <4 x float>, ptr %B
 	%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
 	%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -80,26 +80,26 @@ define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
 
 ; Undef shuffle indices should not prevent matching to VZIP:
 
-define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @vzipi8_undef(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipi8_undef:
 ;CHECK: zip1.8b
 ;CHECK: zip2.8b
 ;CHECK-NEXT: add.8b
-	%tmp1 = load <8 x i8>, <8 x i8>* %A
-	%tmp2 = load <8 x i8>, <8 x i8>* %B
+	%tmp1 = load <8 x i8>, ptr %A
+	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 1, i32 9, i32 undef, i32 10, i32 3, i32 11>
 	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 undef, i32 undef, i32 15>
         %tmp5 = add <8 x i8> %tmp3, %tmp4
 	ret <8 x i8> %tmp5
 }
 
-define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @vzipQi8_undef(ptr %A, ptr %B) nounwind {
 ;CHECK-LABEL: vzipQi8_undef:
 ;CHECK: zip1.16b
 ;CHECK: zip2.16b
 ;CHECK-NEXT: add.16b
-	%tmp1 = load <16 x i8>, <16 x i8>* %A
-	%tmp2 = load <16 x i8>, <16 x i8>* %B
+	%tmp1 = load <16 x i8>, ptr %A
+	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 undef, i32 undef, i32 undef, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
 	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 undef, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 undef, i32 14, i32 30, i32 undef, i32 31>
         %tmp5 = add <16 x i8> %tmp3, %tmp4

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-addrs.ll b/llvm/test/CodeGen/AArch64/arm64_32-addrs.ll
index 82489d7f940d3..7c38144ef3f3e 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-addrs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-addrs.ll
@@ -11,35 +11,35 @@ define i32 @test_valid_wrap(i32 %base) {
 ; CHECK-NEXT:    ret
 
   %newaddr = add nuw i32 %base, -96
-  %ptr = inttoptr i32 %newaddr to i32*
-  %val = load i32, i32* %ptr
+  %ptr = inttoptr i32 %newaddr to ptr
+  %val = load i32, ptr %ptr
   ret i32 %val
 }
 
-define i8 @test_valid_wrap_optimizable(i8* %base) {
+define i8 @test_valid_wrap_optimizable(ptr %base) {
 ; CHECK-LABEL: test_valid_wrap_optimizable:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldurb w0, [x0, #-96]
 ; CHECK-NEXT:    ret
 
-  %newaddr = getelementptr inbounds i8, i8* %base, i32 -96
-  %val = load i8, i8* %newaddr
+  %newaddr = getelementptr inbounds i8, ptr %base, i32 -96
+  %val = load i8, ptr %newaddr
   ret i8 %val
 }
 
-define i8 @test_valid_wrap_optimizable1(i8* %base, i32 %offset) {
+define i8 @test_valid_wrap_optimizable1(ptr %base, i32 %offset) {
 ; CHECK-LABEL: test_valid_wrap_optimizable1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrb w0, [x0, w1, sxtw]
 ; CHECK-NEXT:    ret
 
-  %newaddr = getelementptr inbounds i8, i8* %base, i32 %offset
-  %val = load i8, i8* %newaddr
+  %newaddr = getelementptr inbounds i8, ptr %base, i32 %offset
+  %val = load i8, ptr %newaddr
   ret i8 %val
 }
 
 ;
-define i8 @test_valid_wrap_optimizable2(i8* %base, i32 %offset) {
+define i8 @test_valid_wrap_optimizable2(ptr %base, i32 %offset) {
 ; CHECK-LABEL: test_valid_wrap_optimizable2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov w8, #-100
@@ -48,7 +48,7 @@ define i8 @test_valid_wrap_optimizable2(i8* %base, i32 %offset) {
 ; CHECK-NEXT:    ldrb w0, [x9, x8]
 ; CHECK-NEXT:    ret
 
-  %newaddr = getelementptr inbounds i8, i8* inttoptr(i32 -100 to i8*), i32 %offset
-  %val = load i8, i8* %newaddr
+  %newaddr = getelementptr inbounds i8, ptr inttoptr(i32 -100 to ptr), i32 %offset
+  %val = load i8, ptr %newaddr
   ret i8 %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 6e655a0cc167a..0000262e833da 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -1,245 +1,233 @@
 ; RUN: llc -mtriple=arm64_32-apple-ios7.0 -o - %s | FileCheck %s
 ; RUN: llc -mtriple=arm64_32-apple-ios7.0 -mattr=+outline-atomics -o - %s | FileCheck %s -check-prefix=OUTLINE-ATOMICS
 
-define i8 @test_load_8(i8* %addr) {
+define i8 @test_load_8(ptr %addr) {
 ; CHECK-LABAL: test_load_8:
 ; CHECK: ldarb w0, [x0]
-  %val = load atomic i8, i8* %addr seq_cst, align 1
+  %val = load atomic i8, ptr %addr seq_cst, align 1
   ret i8 %val
 }
 
-define i16 @test_load_16(i16* %addr) {
+define i16 @test_load_16(ptr %addr) {
 ; CHECK-LABAL: test_load_16:
 ; CHECK: ldarh w0, [x0]
-  %val = load atomic i16, i16* %addr acquire, align 2
+  %val = load atomic i16, ptr %addr acquire, align 2
   ret i16 %val
 }
 
-define i32 @test_load_32(i32* %addr) {
+define i32 @test_load_32(ptr %addr) {
 ; CHECK-LABAL: test_load_32:
 ; CHECK: ldar w0, [x0]
-  %val = load atomic i32, i32* %addr seq_cst, align 4
+  %val = load atomic i32, ptr %addr seq_cst, align 4
   ret i32 %val
 }
 
-define i64 @test_load_64(i64* %addr) {
+define i64 @test_load_64(ptr %addr) {
 ; CHECK-LABAL: test_load_64:
 ; CHECK: ldar x0, [x0]
-  %val = load atomic i64, i64* %addr seq_cst, align 8
+  %val = load atomic i64, ptr %addr seq_cst, align 8
   ret i64 %val
 }
 
-define i8* @test_load_ptr(i8** %addr) {
+define ptr @test_load_ptr(ptr %addr) {
 ; CHECK-LABAL: test_load_ptr:
 ; CHECK: ldar w0, [x0]
-  %val = load atomic i8*, i8** %addr seq_cst, align 8
-  ret i8* %val
+  %val = load atomic ptr, ptr %addr seq_cst, align 8
+  ret ptr %val
 }
 
-define void @test_store_8(i8* %addr) {
+define void @test_store_8(ptr %addr) {
 ; CHECK-LABAL: test_store_8:
 ; CHECK: stlrb wzr, [x0]
-  store atomic i8 0, i8* %addr seq_cst, align 1
+  store atomic i8 0, ptr %addr seq_cst, align 1
   ret void
 }
 
-define void @test_store_16(i16* %addr) {
+define void @test_store_16(ptr %addr) {
 ; CHECK-LABAL: test_store_16:
 ; CHECK: stlrh wzr, [x0]
-  store atomic i16 0, i16* %addr seq_cst, align 2
+  store atomic i16 0, ptr %addr seq_cst, align 2
   ret void
 }
 
-define void @test_store_32(i32* %addr) {
+define void @test_store_32(ptr %addr) {
 ; CHECK-LABAL: test_store_32:
 ; CHECK: stlr wzr, [x0]
-  store atomic i32 0, i32* %addr seq_cst, align 4
+  store atomic i32 0, ptr %addr seq_cst, align 4
   ret void
 }
 
-define void @test_store_64(i64* %addr) {
+define void @test_store_64(ptr %addr) {
 ; CHECK-LABAL: test_store_64:
 ; CHECK: stlr xzr, [x0]
-  store atomic i64 0, i64* %addr seq_cst, align 8
+  store atomic i64 0, ptr %addr seq_cst, align 8
   ret void
 }
 
-define void @test_store_ptr(i8** %addr) {
+define void @test_store_ptr(ptr %addr) {
 ; CHECK-LABAL: test_store_ptr:
 ; CHECK: stlr wzr, [x0]
-  store atomic i8* null, i8** %addr seq_cst, align 8
+  store atomic ptr null, ptr %addr seq_cst, align 8
   ret void
 }
 
-declare i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
-declare i64 @llvm.aarch64.ldxr.p0i16(i16* %addr)
-declare i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
-declare i64 @llvm.aarch64.ldxr.p0i64(i64* %addr)
+declare i64 @llvm.aarch64.ldxr.p0(ptr %addr)
 
-define i8 @test_ldxr_8(i8* %addr) {
+define i8 @test_ldxr_8(ptr %addr) {
 ; CHECK-LABEL: test_ldxr_8:
 ; CHECK: ldxrb w0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i8) %addr)
   %val8 = trunc i64 %val to i8
   ret i8 %val8
 }
 
-define i16 @test_ldxr_16(i16* %addr) {
+define i16 @test_ldxr_16(ptr %addr) {
 ; CHECK-LABEL: test_ldxr_16:
 ; CHECK: ldxrh w0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i16) %addr)
   %val16 = trunc i64 %val to i16
   ret i16 %val16
 }
 
-define i32 @test_ldxr_32(i32* %addr) {
+define i32 @test_ldxr_32(ptr %addr) {
 ; CHECK-LABEL: test_ldxr_32:
 ; CHECK: ldxr w0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %addr)
   %val32 = trunc i64 %val to i32
   ret i32 %val32
 }
 
-define i64 @test_ldxr_64(i64* %addr) {
+define i64 @test_ldxr_64(ptr %addr) {
 ; CHECK-LABEL: test_ldxr_64:
 ; CHECK: ldxr x0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr)
+  %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i64) %addr)
   ret i64 %val
 }
 
-declare i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
-declare i64 @llvm.aarch64.ldaxr.p0i16(i16* %addr)
-declare i64 @llvm.aarch64.ldaxr.p0i32(i32* %addr)
-declare i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr)
+declare i64 @llvm.aarch64.ldaxr.p0(ptr %addr)
 
-define i8 @test_ldaxr_8(i8* %addr) {
+define i8 @test_ldaxr_8(ptr %addr) {
 ; CHECK-LABEL: test_ldaxr_8:
 ; CHECK: ldaxrb w0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i8) %addr)
   %val8 = trunc i64 %val to i8
   ret i8 %val8
 }
 
-define i16 @test_ldaxr_16(i16* %addr) {
+define i16 @test_ldaxr_16(ptr %addr) {
 ; CHECK-LABEL: test_ldaxr_16:
 ; CHECK: ldaxrh w0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) %addr)
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i16) %addr)
   %val16 = trunc i64 %val to i16
   ret i16 %val16
 }
 
-define i32 @test_ldaxr_32(i32* %addr) {
+define i32 @test_ldaxr_32(ptr %addr) {
 ; CHECK-LABEL: test_ldaxr_32:
 ; CHECK: ldaxr w0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) %addr)
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32) %addr)
   %val32 = trunc i64 %val to i32
   ret i32 %val32
 }
 
-define i64 @test_ldaxr_64(i64* %addr) {
+define i64 @test_ldaxr_64(ptr %addr) {
 ; CHECK-LABEL: test_ldaxr_64:
 ; CHECK: ldaxr x0, [x0]
 
-  %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr)
+  %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i64) %addr)
   ret i64 %val
 }
 
-declare i32 @llvm.aarch64.stxr.p0i8(i64, i8*)
-declare i32 @llvm.aarch64.stxr.p0i16(i64, i16*)
-declare i32 @llvm.aarch64.stxr.p0i32(i64, i32*)
-declare i32 @llvm.aarch64.stxr.p0i64(i64, i64*)
+declare i32 @llvm.aarch64.stxr.p0(i64, ptr)
 
-define i32 @test_stxr_8(i8* %addr, i8 %val) {
+define i32 @test_stxr_8(ptr %addr, i8 %val) {
 ; CHECK-LABEL: test_stxr_8:
 ; CHECK: stxrb [[TMP:w[0-9]+]], w1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
   %extval = zext i8 %val to i64
-  %success = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
+  %success = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i8) %addr)
   ret i32 %success
 }
 
-define i32 @test_stxr_16(i16* %addr, i16 %val) {
+define i32 @test_stxr_16(ptr %addr, i16 %val) {
 ; CHECK-LABEL: test_stxr_16:
 ; CHECK: stxrh [[TMP:w[0-9]+]], w1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
   %extval = zext i16 %val to i64
-  %success = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
+  %success = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i16) %addr)
   ret i32 %success
 }
 
-define i32 @test_stxr_32(i32* %addr, i32 %val) {
+define i32 @test_stxr_32(ptr %addr, i32 %val) {
 ; CHECK-LABEL: test_stxr_32:
 ; CHECK: stxr [[TMP:w[0-9]+]], w1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
   %extval = zext i32 %val to i64
-  %success = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
+  %success = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i32) %addr)
   ret i32 %success
 }
 
-define i32 @test_stxr_64(i64* %addr, i64 %val) {
+define i32 @test_stxr_64(ptr %addr, i64 %val) {
 ; CHECK-LABEL: test_stxr_64:
 ; CHECK: stxr [[TMP:w[0-9]+]], x1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
-  %success = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
+  %success = call i32 @llvm.aarch64.stxr.p0(i64 %val, ptr elementtype(i64) %addr)
   ret i32 %success
 }
 
-declare i32 @llvm.aarch64.stlxr.p0i8(i64, i8*)
-declare i32 @llvm.aarch64.stlxr.p0i16(i64, i16*)
-declare i32 @llvm.aarch64.stlxr.p0i32(i64, i32*)
-declare i32 @llvm.aarch64.stlxr.p0i64(i64, i64*)
+declare i32 @llvm.aarch64.stlxr.p0(i64, ptr)
 
-define i32 @test_stlxr_8(i8* %addr, i8 %val) {
+define i32 @test_stlxr_8(ptr %addr, i8 %val) {
 ; CHECK-LABEL: test_stlxr_8:
 ; CHECK: stlxrb [[TMP:w[0-9]+]], w1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
   %extval = zext i8 %val to i64
-  %success = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
+  %success = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i8) %addr)
   ret i32 %success
 }
 
-define i32 @test_stlxr_16(i16* %addr, i16 %val) {
+define i32 @test_stlxr_16(ptr %addr, i16 %val) {
 ; CHECK-LABEL: test_stlxr_16:
 ; CHECK: stlxrh [[TMP:w[0-9]+]], w1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
   %extval = zext i16 %val to i64
-  %success = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
+  %success = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i16) %addr)
   ret i32 %success
 }
 
-define i32 @test_stlxr_32(i32* %addr, i32 %val) {
+define i32 @test_stlxr_32(ptr %addr, i32 %val) {
 ; CHECK-LABEL: test_stlxr_32:
 ; CHECK: stlxr [[TMP:w[0-9]+]], w1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
   %extval = zext i32 %val to i64
-  %success = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
+  %success = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i32) %addr)
   ret i32 %success
 }
 
-define i32 @test_stlxr_64(i64* %addr, i64 %val) {
+define i32 @test_stlxr_64(ptr %addr, i64 %val) {
 ; CHECK-LABEL: test_stlxr_64:
 ; CHECK: stlxr [[TMP:w[0-9]+]], x1, [x0]
 ; CHECK: mov w0, [[TMP]]
 
-  %success = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
+  %success = call i32 @llvm.aarch64.stlxr.p0(i64 %val, ptr elementtype(i64) %addr)
   ret i32 %success
 }
 
-define {i8*, i1} @test_cmpxchg_ptr(i8** %addr, i8* %cmp, i8* %new) {
+define {ptr, i1} @test_cmpxchg_ptr(ptr %addr, ptr %cmp, ptr %new) {
 ; OUTLINE-ATOMICS: bl ___aarch64_cas4_acq_rel
 ; CHECK-LABEL: test_cmpxchg_ptr:
 ; CHECK: [[LOOP:LBB[0-9]+_[0-9]+]]:
@@ -258,6 +246,6 @@ define {i8*, i1} @test_cmpxchg_ptr(i8** %addr, i8* %cmp, i8* %new) {
 ; CHECK:     mov w0, [[OLD]]
 ; CHECK:     clrex
 ; CHECK:     ret
-  %res = cmpxchg i8** %addr, i8* %cmp, i8* %new acq_rel acquire
-  ret {i8*, i1} %res
+  %res = cmpxchg ptr %addr, ptr %cmp, ptr %new acq_rel acquire
+  ret {ptr, i1} %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-fastisel.ll b/llvm/test/CodeGen/AArch64/arm64_32-fastisel.ll
index 3c71ee1ee58cc..9a01aacab6c9a 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-fastisel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-fastisel.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -mtriple=arm64_32-apple-ios -O0 -fast-isel %s -o - | FileCheck %s
- at var = global i8* null
+ at var = global ptr null
 
 define void @test_store_release_ptr() {
 ; CHECK-LABEL: test_store_release_ptr
 ; CHECK: mov [[ZERO:w[0-9]+]], wzr
 ; CHECK: stlr [[ZERO]]
-  store atomic i8* null, i8** @var release, align 4
+  store atomic ptr null, ptr @var release, align 4
   br label %next
 
 next:
@@ -14,7 +14,7 @@ next:
 
 declare [2 x i32] @callee()
 
-define void @test_struct_return(i32* %addr) {
+define void @test_struct_return(ptr %addr) {
 ; CHECK-LABEL: test_struct_return:
 ; CHECK: bl _callee
 ; CHECK: x[[COPYX0:[0-9]+]], x0
@@ -22,29 +22,29 @@ define void @test_struct_return(i32* %addr) {
 ; CHECK-DAG: str w[[COPYX0]]
   %res = call [2 x i32] @callee()
   %res.0 = extractvalue [2 x i32] %res, 0
-  store i32 %res.0, i32* %addr
+  store i32 %res.0, ptr %addr
   %res.1 = extractvalue [2 x i32] %res, 1
-  store i32 %res.1, i32* %addr
+  store i32 %res.1, ptr %addr
   ret void
 }
 
-define i8* @test_ret_ptr(i64 %in) {
+define ptr @test_ret_ptr(i64 %in) {
 ; CHECK-LABEL: test_ret_ptr:
 ; CHECK: add [[TMP:x[0-9]]], x0, #1
 ; CHECK: and x0, [[TMP]], #0xffffffff
 
   %sum = add i64 %in, 1
-  %res = inttoptr i64 %sum to i8*
-  ret i8* %res
+  %res = inttoptr i64 %sum to ptr
+  ret ptr %res
 }
 
 ; Handled by SDAG because the struct confuses FastISel, which is fine.
-define {i8*} @test_ret_ptr_struct(i64 %in) {
+define {ptr} @test_ret_ptr_struct(i64 %in) {
 ; CHECK-LABEL: test_ret_ptr_struct:
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #1
 
   %sum = add i64 %in, 1
-  %res.ptr = inttoptr i64 %sum to i8*
-  %res = insertvalue {i8*} undef, i8* %res.ptr, 0
-  ret {i8*} %res
+  %res.ptr = inttoptr i64 %sum to ptr
+  %res = insertvalue {ptr} undef, ptr %res.ptr, 0
+  ret {ptr} %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-frame-pointers.ll b/llvm/test/CodeGen/AArch64/arm64_32-frame-pointers.ll
index 34f5d9b31605a..a9dcd2e94e7ff 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-frame-pointers.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-frame-pointers.ll
@@ -17,9 +17,9 @@ define i8 @test_register_wrangling() {
   %var3 = alloca i8
   %dummy = alloca i8, i32 4100
 
-  %var1p1 = getelementptr i8, i8* %var1, i32 1
-  %val1 = load i8, i8* %var1
-  %val2 = load i8, i8* %var3
+  %var1p1 = getelementptr i8, ptr %var1, i32 1
+  %val1 = load i8, ptr %var1
+  %val2 = load i8, ptr %var3
 
   %sum = add i8 %val1, %val2
   ret i8 %sum

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-gep-sink.ll b/llvm/test/CodeGen/AArch64/arm64_32-gep-sink.ll
index 21c49d38877d8..74fc7b317708e 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-gep-sink.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-gep-sink.ll
@@ -1,37 +1,33 @@
 ; RUN: opt -codegenprepare -mtriple=arm64_32-apple-ios %s -S -o - | FileCheck %s
 
-define void @test_simple_sink(i1* %base, i64 %offset) {
+define void @test_simple_sink(ptr %base, i64 %offset) {
 ; CHECK-LABEL: @test_simple_sink
 ; CHECK: next:
-; CHECK:   [[BASE8:%.*]] = bitcast i1* %base to i8*
-; CHECK:   [[ADDR8:%.*]] = getelementptr i8, i8* [[BASE8]], i64 %offset
-; CHECK:   [[ADDR:%.*]] = bitcast i8* [[ADDR8]] to i1*
-; CHECK:   load volatile i1, i1* [[ADDR]]
-  %addr = getelementptr i1, i1* %base, i64 %offset
-  %tst = load i1, i1* %addr
+; CHECK:   [[ADDR8:%.*]] = getelementptr i8, ptr %base, i64 %offset
+; CHECK:   load volatile i1, ptr [[ADDR8]]
+  %addr = getelementptr i1, ptr %base, i64 %offset
+  %tst = load i1, ptr %addr
   br i1 %tst, label %next, label %end
 
 next:
-  load volatile i1, i1* %addr
+  load volatile i1, ptr %addr
   ret void
 
 end:
   ret void
 }
 
-define void @test_inbounds_sink(i1* %base, i64 %offset) {
+define void @test_inbounds_sink(ptr %base, i64 %offset) {
 ; CHECK-LABEL: @test_inbounds_sink
 ; CHECK: next:
-; CHECK:   [[BASE8:%.*]] = bitcast i1* %base to i8*
-; CHECK:   [[ADDR8:%.*]] = getelementptr inbounds i8, i8* [[BASE8]], i64 %offset
-; CHECK:   [[ADDR:%.*]] = bitcast i8* [[ADDR8]] to i1*
-; CHECK:   load volatile i1, i1* [[ADDR]]
-  %addr = getelementptr inbounds i1, i1* %base, i64 %offset
-  %tst = load i1, i1* %addr
+; CHECK:   [[ADDR8:%.*]] = getelementptr inbounds i8, ptr %base, i64 %offset
+; CHECK:   load volatile i1, ptr [[ADDR8]]
+  %addr = getelementptr inbounds i1, ptr %base, i64 %offset
+  %tst = load i1, ptr %addr
   br i1 %tst, label %next, label %end
 
 next:
-  load volatile i1, i1* %addr
+  load volatile i1, ptr %addr
   ret void
 
 end:
@@ -39,21 +35,19 @@ end:
 }
 
 ; No address derived via an add can be guaranteed inbounds
-define void @test_add_sink(i1* %base, i64 %offset) {
+define void @test_add_sink(ptr %base, i64 %offset) {
 ; CHECK-LABEL: @test_add_sink
 ; CHECK: next:
-; CHECK:   [[BASE8:%.*]] = bitcast i1* %base to i8*
-; CHECK:   [[ADDR8:%.*]] = getelementptr i8, i8* [[BASE8]], i64 %offset
-; CHECK:   [[ADDR:%.*]] = bitcast i8* [[ADDR8]] to i1*
-; CHECK:   load volatile i1, i1* [[ADDR]]
-  %base64 = ptrtoint i1* %base to i64
+; CHECK:   [[ADDR8:%.*]] = getelementptr i8, ptr %base, i64 %offset
+; CHECK:   load volatile i1, ptr [[ADDR8]]
+  %base64 = ptrtoint ptr %base to i64
   %addr64 = add nsw nuw i64 %base64, %offset
-  %addr = inttoptr i64 %addr64 to i1*
-  %tst = load i1, i1* %addr
+  %addr = inttoptr i64 %addr64 to ptr
+  %tst = load i1, ptr %addr
   br i1 %tst, label %next, label %end
 
 next:
-  load volatile i1, i1* %addr
+  load volatile i1, ptr %addr
   ret void
 
 end:

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll b/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll
index f484a2fe65104..ed71f0958604f 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-memcpy.ll
@@ -1,13 +1,13 @@
 ; RUN: llc -mtriple=arm64_32-apple-ios9.0 -o - %s | FileCheck %s
 
-define i64 @test_memcpy(i64* %addr, i8* %src, i1 %tst) minsize {
+define i64 @test_memcpy(ptr %addr, ptr %src, i1 %tst) minsize {
 ; CHECK-LABEL: test_memcpy:
 ; CHECK: ldr [[VAL64:x[0-9]+]], [x0]
 ; [...]
 ; CHECK: and x0, [[VAL64]], #0xffffffff
 ; CHECK: bl _memcpy
 
-  %val64 = load i64, i64* %addr
+  %val64 = load i64, ptr %addr
   br i1 %tst, label %true, label %false
 
 true:
@@ -15,19 +15,19 @@ true:
 
 false:
   %val32 = trunc i64 %val64 to i32
-  %val.ptr = inttoptr i32 %val32 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %val.ptr, i8* %src, i32 128, i32 0, i1 1)
+  %val.ptr = inttoptr i32 %val32 to ptr
+  call void @llvm.memcpy.p0.p0.i32(ptr %val.ptr, ptr %src, i32 128, i32 0, i1 1)
   ret i64 undef
 }
 
-define i64 @test_memmove(i64* %addr, i8* %src, i1 %tst) minsize {
+define i64 @test_memmove(ptr %addr, ptr %src, i1 %tst) minsize {
 ; CHECK-LABEL: test_memmove:
 ; CHECK: ldr [[VAL64:x[0-9]+]], [x0]
 ; [...]
 ; CHECK: and x0, [[VAL64]], #0xffffffff
 ; CHECK: bl _memmove
 
-  %val64 = load i64, i64* %addr
+  %val64 = load i64, ptr %addr
   br i1 %tst, label %true, label %false
 
 true:
@@ -35,19 +35,19 @@ true:
 
 false:
   %val32 = trunc i64 %val64 to i32
-  %val.ptr = inttoptr i32 %val32 to i8*
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* %val.ptr, i8* %src, i32 128, i32 0, i1 1)
+  %val.ptr = inttoptr i32 %val32 to ptr
+  call void @llvm.memmove.p0.p0.i32(ptr %val.ptr, ptr %src, i32 128, i32 0, i1 1)
   ret i64 undef
 }
 
-define i64 @test_memset(i64* %addr, i8* %src, i1 %tst) minsize {
+define i64 @test_memset(ptr %addr, ptr %src, i1 %tst) minsize {
 ; CHECK-LABEL: test_memset:
 ; CHECK: ldr [[VAL64:x[0-9]+]], [x0]
 ; [...]
 ; CHECK: and x0, [[VAL64]], #0xffffffff
 ; CHECK: bl _memset
 
-  %val64 = load i64, i64* %addr
+  %val64 = load i64, ptr %addr
   br i1 %tst, label %true, label %false
 
 true:
@@ -55,12 +55,12 @@ true:
 
 false:
   %val32 = trunc i64 %val64 to i32
-  %val.ptr = inttoptr i32 %val32 to i8*
-  call void @llvm.memset.p0i8.i32(i8* %val.ptr, i8 42, i32 256, i32 0, i1 1)
+  %val.ptr = inttoptr i32 %val32 to ptr
+  call void @llvm.memset.p0.i32(ptr %val.ptr, i8 42, i32 256, i32 0, i1 1)
   ret i64 undef
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
-declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i32, i1)
+declare void @llvm.memmove.p0.p0.i32(ptr, ptr, i32, i32, i1)
+declare void @llvm.memset.p0.i32(ptr, i8, i32, i32, i1)
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-neon.ll b/llvm/test/CodeGen/AArch64/arm64_32-neon.ll
index 9a1ecb2bc1625..b3dfdad191fc3 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-neon.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-neon.ll
@@ -7,14 +7,14 @@ define <2 x double> @test_insert_elt(<2 x double> %vec, double %val) {
   ret <2 x double> %res
 }
 
-define void @test_split_16B(<4 x float> %val, <4 x float>* %addr) {
+define void @test_split_16B(<4 x float> %val, ptr %addr) {
 ; CHECK-LABEL: test_split_16B:
 ; CHECK: str q0, [x0]
-  store <4 x float> %val, <4 x float>* %addr, align 8
+  store <4 x float> %val, ptr %addr, align 8
   ret void
 }
 
-define void @test_split_16B_splat(<4 x i32>, <4 x i32>* %addr) {
+define void @test_split_16B_splat(<4 x i32>, ptr %addr) {
 ; CHECK-LABEL: test_split_16B_splat:
 ; CHECK: str {{q[0-9]+}}
 
@@ -23,176 +23,176 @@ define void @test_split_16B_splat(<4 x i32>, <4 x i32>* %addr) {
   %vec.tmp2 = insertelement <4 x i32> %vec.tmp1, i32 42, i32 2
   %vec = insertelement <4 x i32> %vec.tmp2, i32 42, i32 3
 
-  store <4 x i32> %vec, <4 x i32>* %addr, align 8
+  store <4 x i32> %vec, ptr %addr, align 8
   ret void
 }
 
 
 %vec = type <2 x double>
 
-declare {%vec, %vec} @llvm.aarch64.neon.ld2r.v2f64.p0i8(i8*)
-define {%vec, %vec} @test_neon_load(i8* %addr) {
+declare {%vec, %vec} @llvm.aarch64.neon.ld2r.v2f64.p0(ptr)
+define {%vec, %vec} @test_neon_load(ptr %addr) {
 ; CHECK-LABEL: test_neon_load:
 ; CHECK: ld2r.2d { v0, v1 }, [x0]
-  %res = call {%vec, %vec} @llvm.aarch64.neon.ld2r.v2f64.p0i8(i8* %addr)
+  %res = call {%vec, %vec} @llvm.aarch64.neon.ld2r.v2f64.p0(ptr %addr)
   ret {%vec, %vec} %res
 }
 
-declare {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0i8(%vec, %vec, i64, i8*)
-define {%vec, %vec} @test_neon_load_lane(i8* %addr, %vec %in1, %vec %in2) {
+declare {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0(%vec, %vec, i64, ptr)
+define {%vec, %vec} @test_neon_load_lane(ptr %addr, %vec %in1, %vec %in2) {
 ; CHECK-LABEL: test_neon_load_lane:
 ; CHECK: ld2.d { v0, v1 }[0], [x0]
-  %res = call {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0i8(%vec %in1, %vec %in2, i64 0, i8* %addr)
+  %res = call {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0(%vec %in1, %vec %in2, i64 0, ptr %addr)
   ret {%vec, %vec} %res
 }
 
-declare void @llvm.aarch64.neon.st2.v2f64.p0i8(%vec, %vec, i8*)
-define void @test_neon_store(i8* %addr, %vec %in1, %vec %in2) {
+declare void @llvm.aarch64.neon.st2.v2f64.p0(%vec, %vec, ptr)
+define void @test_neon_store(ptr %addr, %vec %in1, %vec %in2) {
 ; CHECK-LABEL: test_neon_store:
 ; CHECK: st2.2d { v0, v1 }, [x0]
-  call void @llvm.aarch64.neon.st2.v2f64.p0i8(%vec %in1, %vec %in2, i8* %addr)
+  call void @llvm.aarch64.neon.st2.v2f64.p0(%vec %in1, %vec %in2, ptr %addr)
   ret void
 }
 
-declare void @llvm.aarch64.neon.st2lane.v2f64.p0i8(%vec, %vec, i64, i8*)
-define void @test_neon_store_lane(i8* %addr, %vec %in1, %vec %in2) {
+declare void @llvm.aarch64.neon.st2lane.v2f64.p0(%vec, %vec, i64, ptr)
+define void @test_neon_store_lane(ptr %addr, %vec %in1, %vec %in2) {
 ; CHECK-LABEL: test_neon_store_lane:
 ; CHECK: st2.d { v0, v1 }[1], [x0]
-  call void @llvm.aarch64.neon.st2lane.v2f64.p0i8(%vec %in1, %vec %in2, i64 1, i8* %addr)
+  call void @llvm.aarch64.neon.st2lane.v2f64.p0(%vec %in1, %vec %in2, i64 1, ptr %addr)
   ret void
 }
 
-declare {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0i8(i8*)
-define {{%vec, %vec}, i8*} @test_neon_load_post(i8* %addr, i32 %offset) {
+declare {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0(ptr)
+define {{%vec, %vec}, ptr} @test_neon_load_post(ptr %addr, i32 %offset) {
 ; CHECK-LABEL: test_neon_load_post:
 ; CHECK-DAG: sxtw [[OFFSET:x[0-9]+]], w1
 ; CHECK: ld2.2d { v0, v1 }, [x0], [[OFFSET]]
 
-  %vecs = call {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0i8(i8* %addr)
+  %vecs = call {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0(ptr %addr)
 
-  %addr.new = getelementptr inbounds i8, i8* %addr, i32 %offset
+  %addr.new = getelementptr inbounds i8, ptr %addr, i32 %offset
 
-  %res.tmp = insertvalue {{%vec, %vec}, i8*} undef, {%vec, %vec} %vecs, 0
-  %res = insertvalue {{%vec, %vec}, i8*} %res.tmp, i8* %addr.new, 1
-  ret {{%vec, %vec}, i8*} %res
+  %res.tmp = insertvalue {{%vec, %vec}, ptr} undef, {%vec, %vec} %vecs, 0
+  %res = insertvalue {{%vec, %vec}, ptr} %res.tmp, ptr %addr.new, 1
+  ret {{%vec, %vec}, ptr} %res
 }
 
-define {{%vec, %vec}, i8*} @test_neon_load_post_lane(i8* %addr, i32 %offset, %vec %in1, %vec %in2) {
+define {{%vec, %vec}, ptr} @test_neon_load_post_lane(ptr %addr, i32 %offset, %vec %in1, %vec %in2) {
 ; CHECK-LABEL: test_neon_load_post_lane:
 ; CHECK-DAG: sxtw [[OFFSET:x[0-9]+]], w1
 ; CHECK: ld2.d { v0, v1 }[1], [x0], [[OFFSET]]
 
-  %vecs = call {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0i8(%vec %in1, %vec %in2, i64 1, i8* %addr)
+  %vecs = call {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0(%vec %in1, %vec %in2, i64 1, ptr %addr)
 
-  %addr.new = getelementptr inbounds i8, i8* %addr, i32 %offset
+  %addr.new = getelementptr inbounds i8, ptr %addr, i32 %offset
 
-  %res.tmp = insertvalue {{%vec, %vec}, i8*} undef, {%vec, %vec} %vecs, 0
-  %res = insertvalue {{%vec, %vec}, i8*} %res.tmp, i8* %addr.new, 1
-  ret {{%vec, %vec}, i8*} %res
+  %res.tmp = insertvalue {{%vec, %vec}, ptr} undef, {%vec, %vec} %vecs, 0
+  %res = insertvalue {{%vec, %vec}, ptr} %res.tmp, ptr %addr.new, 1
+  ret {{%vec, %vec}, ptr} %res
 }
 
-define i8* @test_neon_store_post(i8* %addr, i32 %offset, %vec %in1, %vec %in2) {
+define ptr @test_neon_store_post(ptr %addr, i32 %offset, %vec %in1, %vec %in2) {
 ; CHECK-LABEL: test_neon_store_post:
 ; CHECK-DAG: sxtw [[OFFSET:x[0-9]+]], w1
 ; CHECK: st2.2d { v0, v1 }, [x0], [[OFFSET]]
 
-  call void @llvm.aarch64.neon.st2.v2f64.p0i8(%vec %in1, %vec %in2, i8* %addr)
+  call void @llvm.aarch64.neon.st2.v2f64.p0(%vec %in1, %vec %in2, ptr %addr)
 
-  %addr.new = getelementptr inbounds i8, i8* %addr, i32 %offset
+  %addr.new = getelementptr inbounds i8, ptr %addr, i32 %offset
 
-  ret i8* %addr.new
+  ret ptr %addr.new
 }
 
-define i8* @test_neon_store_post_lane(i8* %addr, i32 %offset, %vec %in1, %vec %in2) {
+define ptr @test_neon_store_post_lane(ptr %addr, i32 %offset, %vec %in1, %vec %in2) {
 ; CHECK-LABEL: test_neon_store_post_lane:
 ; CHECK: sxtw [[OFFSET:x[0-9]+]], w1
 ; CHECK: st2.d { v0, v1 }[0], [x0], [[OFFSET]]
 
-  call void @llvm.aarch64.neon.st2lane.v2f64.p0i8(%vec %in1, %vec %in2, i64 0, i8* %addr)
+  call void @llvm.aarch64.neon.st2lane.v2f64.p0(%vec %in1, %vec %in2, i64 0, ptr %addr)
 
-  %addr.new = getelementptr inbounds i8, i8* %addr, i32 %offset
+  %addr.new = getelementptr inbounds i8, ptr %addr, i32 %offset
 
-  ret i8* %addr.new
+  ret ptr %addr.new
 }
 
 ; ld1 is slightly 
diff erent because it goes via ISelLowering of normal IR ops
 ; rather than an intrinsic.
-define {%vec, double*} @test_neon_ld1_post_lane(double* %addr, i32 %offset, %vec %in) {
+define {%vec, ptr} @test_neon_ld1_post_lane(ptr %addr, i32 %offset, %vec %in) {
 ; CHECK-LABEL: test_neon_ld1_post_lane:
 ; CHECK: sbfiz [[OFFSET:x[0-9]+]], x1, #3, #32
 ; CHECK: ld1.d { v0 }[0], [x0], [[OFFSET]]
 
-  %loaded = load double, double* %addr, align 8
+  %loaded = load double, ptr %addr, align 8
   %newvec = insertelement %vec %in, double %loaded, i32 0
 
-  %addr.new = getelementptr inbounds double, double* %addr, i32 %offset
+  %addr.new = getelementptr inbounds double, ptr %addr, i32 %offset
 
-  %res.tmp = insertvalue {%vec, double*} undef, %vec %newvec, 0
-  %res = insertvalue {%vec, double*} %res.tmp, double* %addr.new, 1
+  %res.tmp = insertvalue {%vec, ptr} undef, %vec %newvec, 0
+  %res = insertvalue {%vec, ptr} %res.tmp, ptr %addr.new, 1
 
-  ret {%vec, double*} %res
+  ret {%vec, ptr} %res
 }
 
-define {{%vec, %vec}, i8*} @test_neon_load_post_exact(i8* %addr) {
+define {{%vec, %vec}, ptr} @test_neon_load_post_exact(ptr %addr) {
 ; CHECK-LABEL: test_neon_load_post_exact:
 ; CHECK: ld2.2d { v0, v1 }, [x0], #32
 
-  %vecs = call {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0i8(i8* %addr)
+  %vecs = call {%vec, %vec} @llvm.aarch64.neon.ld2.v2f64.p0(ptr %addr)
 
-  %addr.new = getelementptr inbounds i8, i8* %addr, i32 32
+  %addr.new = getelementptr inbounds i8, ptr %addr, i32 32
 
-  %res.tmp = insertvalue {{%vec, %vec}, i8*} undef, {%vec, %vec} %vecs, 0
-  %res = insertvalue {{%vec, %vec}, i8*} %res.tmp, i8* %addr.new, 1
-  ret {{%vec, %vec}, i8*} %res
+  %res.tmp = insertvalue {{%vec, %vec}, ptr} undef, {%vec, %vec} %vecs, 0
+  %res = insertvalue {{%vec, %vec}, ptr} %res.tmp, ptr %addr.new, 1
+  ret {{%vec, %vec}, ptr} %res
 }
 
-define {%vec, double*} @test_neon_ld1_post_lane_exact(double* %addr, %vec %in) {
+define {%vec, ptr} @test_neon_ld1_post_lane_exact(ptr %addr, %vec %in) {
 ; CHECK-LABEL: test_neon_ld1_post_lane_exact:
 ; CHECK: ld1.d { v0 }[0], [x0], #8
 
-  %loaded = load double, double* %addr, align 8
+  %loaded = load double, ptr %addr, align 8
   %newvec = insertelement %vec %in, double %loaded, i32 0
 
-  %addr.new = getelementptr inbounds double, double* %addr, i32 1
+  %addr.new = getelementptr inbounds double, ptr %addr, i32 1
 
-  %res.tmp = insertvalue {%vec, double*} undef, %vec %newvec, 0
-  %res = insertvalue {%vec, double*} %res.tmp, double* %addr.new, 1
+  %res.tmp = insertvalue {%vec, ptr} undef, %vec %newvec, 0
+  %res = insertvalue {%vec, ptr} %res.tmp, ptr %addr.new, 1
 
-  ret {%vec, double*} %res
+  ret {%vec, ptr} %res
 }
 
 ; As in the general load/store case, this GEP has defined semantics when the
 ; address wraps. We cannot use post-indexed addressing.
-define {%vec, double*} @test_neon_ld1_notpost_lane_exact(double* %addr, %vec %in) {
+define {%vec, ptr} @test_neon_ld1_notpost_lane_exact(ptr %addr, %vec %in) {
 ; CHECK-LABEL: test_neon_ld1_notpost_lane_exact:
 ; CHECK-NOT: ld1.d { {{v[0-9]+}} }[0], [{{x[0-9]+|sp}}], #8
 ; CHECK: add w0, w0, #8
 ; CHECK: ret
 
-  %loaded = load double, double* %addr, align 8
+  %loaded = load double, ptr %addr, align 8
   %newvec = insertelement %vec %in, double %loaded, i32 0
 
-  %addr.new = getelementptr double, double* %addr, i32 1
+  %addr.new = getelementptr double, ptr %addr, i32 1
 
-  %res.tmp = insertvalue {%vec, double*} undef, %vec %newvec, 0
-  %res = insertvalue {%vec, double*} %res.tmp, double* %addr.new, 1
+  %res.tmp = insertvalue {%vec, ptr} undef, %vec %newvec, 0
+  %res = insertvalue {%vec, ptr} %res.tmp, ptr %addr.new, 1
 
-  ret {%vec, double*} %res
+  ret {%vec, ptr} %res
 }
 
-define {%vec, double*} @test_neon_ld1_notpost_lane(double* %addr, i32 %offset, %vec %in) {
+define {%vec, ptr} @test_neon_ld1_notpost_lane(ptr %addr, i32 %offset, %vec %in) {
 ; CHECK-LABEL: test_neon_ld1_notpost_lane:
 ; CHECK-NOT: ld1.d { {{v[0-9]+}} }[0], [{{x[0-9]+|sp}}], {{x[0-9]+|sp}}
 ; CHECK: add w0, w0, w1, lsl #3
 ; CHECK: ret
 
-  %loaded = load double, double* %addr, align 8
+  %loaded = load double, ptr %addr, align 8
   %newvec = insertelement %vec %in, double %loaded, i32 0
 
-  %addr.new = getelementptr double, double* %addr, i32 %offset
+  %addr.new = getelementptr double, ptr %addr, i32 %offset
 
-  %res.tmp = insertvalue {%vec, double*} undef, %vec %newvec, 0
-  %res = insertvalue {%vec, double*} %res.tmp, double* %addr.new, 1
+  %res.tmp = insertvalue {%vec, ptr} undef, %vec %newvec, 0
+  %res = insertvalue {%vec, ptr} %res.tmp, ptr %addr.new, 1
 
-  ret {%vec, double*} %res
+  ret {%vec, ptr} %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-null.ll b/llvm/test/CodeGen/AArch64/arm64_32-null.ll
index 6360b6298160f..b4d6581a3ac53 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-null.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-null.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -fast-isel=true  -global-isel=false -O0 -mtriple=arm64_32-apple-ios %s -o - | FileCheck %s
 ; RUN: llc -fast-isel=false -global-isel=false -O0 -mtriple=arm64_32-apple-ios %s -o - | FileCheck %s
 
-define void @test_store(i8** %p) {
+define void @test_store(ptr %p) {
 ; CHECK-LABEL: test_store:
 ; CHECK: mov [[R1:w[0-9]+]], wzr
 ; CHECK: str [[R1]], [x0]
 
-  store i8* null, i8** %p
+  store ptr null, ptr %p
   ret void
 }
 
-define void @test_phi(i8** %p) {
+define void @test_phi(ptr %p) {
 ; CHECK-LABEL: test_phi:
 ; CHECK: mov [[R1:x[0-9]+]], xzr
 ; CHECK: str [[R1]], [sp, #8]
@@ -23,7 +23,7 @@ define void @test_phi(i8** %p) {
 bb0:
   br label %bb1
 bb1:
-  %tmp0 = phi i8* [ null, %bb0 ]
-  store i8* %tmp0, i8** %p
+  %tmp0 = phi ptr [ null, %bb0 ]
+  store ptr %tmp0, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll b/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll
index 74b88305b571c..2d6f0fbe30888 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-pointer-extend.ll
@@ -6,44 +6,44 @@ define void @pass_pointer(i64 %in) {
 ; CHECK: bl _take_pointer
 
   %in32 = trunc i64 %in to i32
-  %ptr = inttoptr i32 %in32 to i8*
-  call i64 @take_pointer(i8* %ptr)
+  %ptr = inttoptr i32 %in32 to ptr
+  call i64 @take_pointer(ptr %ptr)
   ret void
 }
 
-define i64 @take_pointer(i8* %ptr) nounwind {
+define i64 @take_pointer(ptr %ptr) nounwind {
 ; CHECK-LABEL: take_pointer:
 ; CHECK-NEXT: %bb.0
 ; CHECK-NEXT: ret
 
-  %val = ptrtoint i8* %ptr to i32
+  %val = ptrtoint ptr %ptr to i32
   %res = zext i32 %val to i64
   ret i64 %res
 }
 
-define i32 @callee_ptr_stack_slot([8 x i64], i8*, i32 %val) {
+define i32 @callee_ptr_stack_slot([8 x i64], ptr, i32 %val) {
 ; CHECK-LABEL: callee_ptr_stack_slot:
 ; CHECK: ldr w0, [sp, #4]
 
   ret i32 %val
 }
 
-define void @caller_ptr_stack_slot(i8* %ptr) {
+define void @caller_ptr_stack_slot(ptr %ptr) {
 ; CHECK-LABEL: caller_ptr_stack_slot:
 ; CHECK-DAG: mov [[VAL:w[0-9]]], #42
 ; CHECK: stp w0, [[VAL]], [sp]
 
-  call i32 @callee_ptr_stack_slot([8 x i64] undef, i8* %ptr, i32 42)
+  call i32 @callee_ptr_stack_slot([8 x i64] undef, ptr %ptr, i32 42)
   ret void
 }
 
-define i8* @return_ptr(i64 %in, i64 %r) {
+define ptr @return_ptr(i64 %in, i64 %r) {
 ; CHECK-LABEL: return_ptr:
 ; CHECK: sdiv [[VAL64:x[0-9]+]], x0, x1
 ; CHECK: and x0, [[VAL64]], #0xffffffff
 
   %sum = sdiv i64 %in, %r
   %sum32 = trunc i64 %sum to i32
-  %res = inttoptr i32 %sum32 to i8*
-  ret i8* %res
+  %res = inttoptr i32 %sum32 to ptr
+  ret ptr %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-stack-pointers.ll b/llvm/test/CodeGen/AArch64/arm64_32-stack-pointers.ll
index a233e3416c1cd..1ae7dd4fe5ed2 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-stack-pointers.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-stack-pointers.ll
@@ -1,13 +1,13 @@
 ; RUN: llc -mtriple=arm64_32-apple-ios9.0 -o - %s | FileCheck %s
 
-declare void @callee([8 x i64], i8*, i8*)
+declare void @callee([8 x i64], ptr, ptr)
 
 ; Make sure we don't accidentally store X0 or XZR, which might well
 ; clobber other arguments or data.
-define void @test_stack_ptr_32bits(i8* %in) {
+define void @test_stack_ptr_32bits(ptr %in) {
 ; CHECK-LABEL: test_stack_ptr_32bits:
 ; CHECK-DAG: stp wzr, w0, [sp]
 
-  call void @callee([8 x i64] undef, i8* null, i8* %in)
+  call void @callee([8 x i64] undef, ptr null, ptr %in)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-tls.ll b/llvm/test/CodeGen/AArch64/arm64_32-tls.ll
index fada715304c8c..c43b574fb80d3 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-tls.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-tls.ll
@@ -7,7 +7,7 @@ define i32 @test_thread_local() {
 ; CHECK: ldr w[[DEST:[0-9]+]], [x0]
 ; CHECK: blr x[[DEST]]
 
-  %val = load i32, i32* @var
+  %val = load i32, ptr @var
   ret i32 %val
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32-va.ll b/llvm/test/CodeGen/AArch64/arm64_32-va.ll
index 94ff4716139b5..09ddaa9594ab9 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-va.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-va.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -mtriple=arm64_32-apple-ios %s -o - | FileCheck %s
 
-define void @test_va_copy(i8* %dst, i8* %src) {
+define void @test_va_copy(ptr %dst, ptr %src) {
 ; CHECK-LABEL: test_va_copy:
 ; CHECK: ldr [[PTR:w[0-9]+]], [x1]
 ; CHECK: str [[PTR]], [x0]
 
-  call void @llvm.va_copy(i8* %dst, i8* %src)
+  call void @llvm.va_copy(ptr %dst, ptr %src)
   ret void
 }
 
@@ -13,9 +13,8 @@ define void @test_va_start(i32, ...)  {
 ; CHECK-LABEL: test_va_start
 ; CHECK: add x[[LIST:[0-9]+]], sp, #16
 ; CHECK: str w[[LIST]],
-  %slot = alloca i8*, align 4
-  %list = bitcast i8** %slot to i8*
-  call void @llvm.va_start(i8* %list)
+  %slot = alloca ptr, align 4
+  call void @llvm.va_start(ptr %slot)
   ret void
 }
 
@@ -23,34 +22,33 @@ define void @test_va_start_odd([8 x i64], i32, ...) {
 ; CHECK-LABEL: test_va_start_odd:
 ; CHECK: add x[[LIST:[0-9]+]], sp, #20
 ; CHECK: str w[[LIST]],
-  %slot = alloca i8*, align 4
-  %list = bitcast i8** %slot to i8*
-  call void @llvm.va_start(i8* %list)
+  %slot = alloca ptr, align 4
+  call void @llvm.va_start(ptr %slot)
   ret void
 }
 
-define i8* @test_va_arg(i8** %list) {
+define ptr @test_va_arg(ptr %list) {
 ; CHECK-LABEL: test_va_arg:
 ; CHECK: ldr w[[LOC:[0-9]+]], [x0]
 ; CHECK: add [[NEXTLOC:w[0-9]+]], w[[LOC]], #4
 ; CHECK: str [[NEXTLOC]], [x0]
 ; CHECK: ldr w0, [x[[LOC]]]
-  %res = va_arg i8** %list, i8*
-  ret i8* %res
+  %res = va_arg ptr %list, ptr
+  ret ptr %res
 }
 
-define i8* @really_test_va_arg(i8** %list, i1 %tst) {
+define ptr @really_test_va_arg(ptr %list, i1 %tst) {
 ; CHECK-LABEL: really_test_va_arg:
 ; CHECK: ldr w[[LOC:[0-9]+]], [x0]
 ; CHECK: add [[NEXTLOC:w[0-9]+]], w[[LOC]], #4
 ; CHECK: str [[NEXTLOC]], [x0]
 ; CHECK: ldr w[[VAARG:[0-9]+]], [x[[LOC]]]
 ; CHECK: csel x0, x[[VAARG]], xzr
-  %tmp = va_arg i8** %list, i8*
-  %res = select i1 %tst, i8* %tmp, i8* null
-  ret i8* %res
+  %tmp = va_arg ptr %list, ptr
+  %res = select i1 %tst, ptr %tmp, ptr null
+  ret ptr %res
 }
 
-declare void @llvm.va_start(i8*) 
+declare void @llvm.va_start(ptr) 
 
-declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_copy(ptr, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/arm64_32.ll b/llvm/test/CodeGen/AArch64/arm64_32.ll
index 0eb5b637b08f9..f857c7f7c55a2 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32.ll
@@ -12,13 +12,13 @@
 
 @var_got = external global i8
 
-define i32* @test_global_addr() {
+define ptr @test_global_addr() {
 ; CHECK-LABEL: test_global_addr:
 ; CHECK: adrp [[PAGE:x[0-9]+]], _var32 at PAGE
 ; CHECK-OPT: add x0, [[PAGE]], _var32 at PAGEOFF
 ; CHECK-FAST: add [[TMP:x[0-9]+]], [[PAGE]], _var32 at PAGEOFF
 ; CHECK-FAST: and x0, [[TMP]], #0xffffffff
-  ret i32* @var32
+  ret ptr @var32
 }
 
 ; ADRP is necessarily 64-bit. The important point to check is that, however that
@@ -31,14 +31,14 @@ define i64 @test_global_addr_extension() {
 ; CHECK-NOT: and
 ; CHECK: ret
 
-  ret i64 ptrtoint(i32* @var32 to i64)
+  ret i64 ptrtoint(ptr @var32 to i64)
 }
 
 define i32 @test_global_value() {
 ; CHECK-LABEL: test_global_value:
 ; CHECK: adrp x[[PAGE:[0-9]+]], _var32 at PAGE
 ; CHECK: ldr w0, [x[[PAGE]], _var32 at PAGEOFF]
-  %val = load i32, i32* @var32, align 4
+  %val = load i32, ptr @var32, align 4
   ret i32 %val
 }
 
@@ -48,10 +48,10 @@ define i32 @test_unsafe_indexed_add() {
 ; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32 at PAGEOFF
 ; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #32
 ; CHECK: ldr w0, [x[[ADDR]]]
-  %addr_int = ptrtoint i32* @var32 to i32
+  %addr_int = ptrtoint ptr @var32 to i32
   %addr_plus_32 = add i32 %addr_int, 32
-  %addr = inttoptr i32 %addr_plus_32 to i32*
-  %val = load i32, i32* %addr, align 4
+  %addr = inttoptr i32 %addr_plus_32 to ptr
+  %val = load i32, ptr %addr, align 4
   ret i32 %val
 }
 
@@ -62,10 +62,10 @@ define i32 @test_safe_indexed_add() {
 ; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32 at PAGEOFF
 ; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #32
 ; CHECK: ldr w0, [x[[ADDR]]]
-  %addr_int = ptrtoint i32* @var32 to i64
+  %addr_int = ptrtoint ptr @var32 to i64
   %addr_plus_32 = add nuw i64 %addr_int, 32
-  %addr = inttoptr i64 %addr_plus_32 to i32*
-  %val = load i32, i32* %addr, align 4
+  %addr = inttoptr i64 %addr_plus_32 to ptr
+  %val = load i32, ptr %addr, align 4
   ret i32 %val
 }
 
@@ -76,8 +76,8 @@ define i32 @test_safe_indexed_or(i32 %in) {
 ; CHECK: ldr w0, [x[[ADDR]]]
   %addr_int = and i32 %in, -16
   %addr_plus_4 = or i32 %addr_int, 4
-  %addr = inttoptr i32 %addr_plus_4 to i32*
-  %val = load i32, i32* %addr, align 4
+  %addr = inttoptr i32 %addr_plus_4 to ptr
+  %val = load i32, ptr %addr, align 4
   ret i32 %val
 }
 
@@ -91,10 +91,10 @@ define i32 @test_unsafe_nsw_indexed_add() {
 ; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #32
 ; CHECK-NOT: ubfx
 ; CHECK: ldr w0, [x[[ADDR]]]
-  %addr_int = ptrtoint i32* @var32 to i32
+  %addr_int = ptrtoint ptr @var32 to i32
   %addr_plus_32 = add nsw i32 %addr_int, 32
-  %addr = inttoptr i32 %addr_plus_32 to i32*
-  %val = load i32, i32* %addr, align 4
+  %addr = inttoptr i32 %addr_plus_32 to ptr
+  %val = load i32, ptr %addr, align 4
   ret i32 %val
 }
 
@@ -104,10 +104,10 @@ define i32 @test_unsafe_unscaled_add() {
 ; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32 at PAGEOFF
 ; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #3
 ; CHECK: ldr w0, [x[[ADDR]]]
-  %addr_int = ptrtoint i32* @var32 to i32
+  %addr_int = ptrtoint ptr @var32 to i32
   %addr_plus_3 = add i32 %addr_int, 3
-  %addr = inttoptr i32 %addr_plus_3 to i32*
-  %val = load i32, i32* %addr, align 1
+  %addr = inttoptr i32 %addr_plus_3 to ptr
+  %val = load i32, ptr %addr, align 1
   ret i32 %val
 }
 
@@ -118,10 +118,10 @@ define i32 @test_safe_unscaled_add() {
 ; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32 at PAGEOFF
 ; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #3
 ; CHECK: ldr w0, [x[[ADDR]]]
-  %addr_int = ptrtoint i32* @var32 to i32
+  %addr_int = ptrtoint ptr @var32 to i32
   %addr_plus_3 = add nuw i32 %addr_int, 3
-  %addr = inttoptr i32 %addr_plus_3 to i32*
-  %val = load i32, i32* %addr, align 1
+  %addr = inttoptr i32 %addr_plus_3 to ptr
+  %val = load i32, ptr %addr, align 1
   ret i32 %val
 }
 
@@ -134,10 +134,10 @@ define i32 @test_unsafe_nsw_unscaled_add() {
 ; CHECK: add w[[ADDR:[0-9]+]], w[[VAR32]], #3
 ; CHECK-NOT: ubfx
 ; CHECK: ldr w0, [x[[ADDR]]]
-  %addr_int = ptrtoint i32* @var32 to i32
+  %addr_int = ptrtoint ptr @var32 to i32
   %addr_plus_3 = add nsw i32 %addr_int, 3
-  %addr = inttoptr i32 %addr_plus_3 to i32*
-  %val = load i32, i32* %addr, align 1
+  %addr = inttoptr i32 %addr_plus_3 to ptr
+  %val = load i32, ptr %addr, align 1
   ret i32 %val
 }
 
@@ -148,23 +148,23 @@ define i32 @test_unsafe_negative_unscaled_add() {
 ; CHECK: add x[[VAR32:[0-9]+]], {{x[0-9]+}}, _var32 at PAGEOFF
 ; CHECK: sub w[[ADDR:[0-9]+]], w[[VAR32]], #3
 ; CHECK: ldr w0, [x[[ADDR]]]
-  %addr_int = ptrtoint i32* @var32 to i32
+  %addr_int = ptrtoint ptr @var32 to i32
   %addr_minus_3 = add i32 %addr_int, -3
-  %addr = inttoptr i32 %addr_minus_3 to i32*
-  %val = load i32, i32* %addr, align 1
+  %addr = inttoptr i32 %addr_minus_3 to ptr
+  %val = load i32, ptr %addr, align 1
   ret i32 %val
 }
 
-define i8* @test_got_addr() {
+define ptr @test_got_addr() {
 ; CHECK-LABEL: test_got_addr:
 ; CHECK: adrp x[[PAGE:[0-9]+]], _var_got at GOTPAGE
 ; CHECK-OPT: ldr w0, [x[[PAGE]], _var_got at GOTPAGEOFF]
 ; CHECK-FAST: ldr w[[TMP:[0-9]+]], [x[[PAGE]], _var_got at GOTPAGEOFF]
 ; CHECK-FAST: and x0, x[[TMP]], #0xffffffff
-  ret i8* @var_got
+  ret ptr @var_got
 }
 
-define float @test_va_arg_f32(i8** %list) {
+define float @test_va_arg_f32(ptr %list) {
 ; CHECK-LABEL: test_va_arg_f32:
 
 ; CHECK: ldr w[[START:[0-9]+]], [x0]
@@ -174,12 +174,12 @@ define float @test_va_arg_f32(i8** %list) {
   ; Floating point arguments get promoted to double as per C99.
 ; CHECK: ldr [[DBL:d[0-9]+]], [x[[START]]]
 ; CHECK: fcvt s0, [[DBL]]
-  %res = va_arg i8** %list, float
+  %res = va_arg ptr %list, float
   ret float %res
 }
 
 ; Interesting point is that the slot is 4 bytes.
-define i8 @test_va_arg_i8(i8** %list) {
+define i8 @test_va_arg_i8(ptr %list) {
 ; CHECK-LABEL: test_va_arg_i8:
 
 ; CHECK: ldr w[[START:[0-9]+]], [x0]
@@ -189,13 +189,13 @@ define i8 @test_va_arg_i8(i8** %list) {
   ; i8 gets promoted to int (again, as per C99).
 ; CHECK: ldr w0, [x[[START]]]
 
-  %res = va_arg i8** %list, i8
+  %res = va_arg ptr %list, i8
   ret i8 %res
 }
 
 ; Interesting point is that the slot needs aligning (again, min size is 4
 ; bytes).
-define i64 @test_va_arg_i64(i64** %list) {
+define i64 @test_va_arg_i64(ptr %list) {
 ; CHECK-LABEL: test_va_arg_i64:
 
   ; Update the list for the next user (minimum slot size is 4, but the actual
@@ -208,12 +208,12 @@ define i64 @test_va_arg_i64(i64** %list) {
 
 ; CHECK: ldr x0, [x[[START]]]
 
-  %res = va_arg i64** %list, i64
+  %res = va_arg ptr %list, i64
   ret i64 %res
 }
 
 declare void @bar(...)
-define void @test_va_call(i8 %l, i8 %r, float %in, i8* %ptr) {
+define void @test_va_call(i8 %l, i8 %r, float %in, ptr %ptr) {
 ; CHECK-LABEL: test_va_call:
 ; CHECK: add [[SUM:w[0-9]+]], {{w[0-9]+}}, w1
 
@@ -225,32 +225,32 @@ define void @test_va_call(i8 %l, i8 %r, float %in, i8* %ptr) {
 
   ; Add them to ensure real promotion occurs.
   %sum = add i8 %l, %r
-  call void(...) @bar(i8 %sum, i64 0, float %in, double 0.0, i8* %ptr)
+  call void(...) @bar(i8 %sum, i64 0, float %in, double 0.0, ptr %ptr)
   ret void
 }
 
-declare i8* @llvm.frameaddress(i32)
+declare ptr @llvm.frameaddress(i32)
 
-define i8* @test_frameaddr() {
+define ptr @test_frameaddr() {
 ; CHECK-LABEL: test_frameaddr:
 ; CHECK-OPT: ldr x0, [x29]
 ; CHECK-FAST: ldr [[TMP:x[0-9]+]], [x29]
 ; CHECK-FAST: and x0, [[TMP]], #0xffffffff
-  %val = call i8* @llvm.frameaddress(i32 1)
-  ret i8* %val
+  %val = call ptr @llvm.frameaddress(i32 1)
+  ret ptr %val
 }
 
-declare i8* @llvm.returnaddress(i32)
+declare ptr @llvm.returnaddress(i32)
 
-define i8* @test_toplevel_returnaddr() {
+define ptr @test_toplevel_returnaddr() {
 ; CHECK-LABEL: test_toplevel_returnaddr:
 ; CHECK-OPT: mov x0, x30
 ; CHECK-FAST: and x0, x30, #0xffffffff
-  %val = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %val
+  %val = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %val
 }
 
-define i8* @test_deep_returnaddr() {
+define ptr @test_deep_returnaddr() {
 ; CHECK-LABEL: test_deep_returnaddr:
 ; CHECK: ldr x[[FRAME_REC:[0-9]+]], [x29]
 ; CHECK-OPT: ldr x30, [x[[FRAME_REC]], #8]
@@ -258,11 +258,11 @@ define i8* @test_deep_returnaddr() {
 ; CHECK-OPT: mov x0, x30
 ; CHECK-FAST: ldr [[TMP:x[0-9]+]], [x[[FRAME_REC]], #8]
 ; CHECK-FAST: and x0, [[TMP]], #0xffffffff
-  %val = call i8* @llvm.returnaddress(i32 1)
-  ret i8* %val
+  %val = call ptr @llvm.returnaddress(i32 1)
+  ret ptr %val
 }
 
-define void @test_indirect_call(void()* %func) {
+define void @test_indirect_call(ptr %func) {
 ; CHECK-LABEL: test_indirect_call:
 ; CHECK: blr x0
   call void() %func()
@@ -270,14 +270,13 @@ define void @test_indirect_call(void()* %func) {
 }
 
 ; Safe to use the unextended address here
-define void @test_indirect_safe_call(i32* %weird_funcs) {
+define void @test_indirect_safe_call(ptr %weird_funcs) {
 ; CHECK-LABEL: test_indirect_safe_call:
 ; CHECK: add w[[ADDR32:[0-9]+]], w0, #4
 ; CHECK-OPT-NOT: ubfx
 ; CHECK: blr x[[ADDR32]]
-  %addr = getelementptr i32, i32* %weird_funcs, i32 1
-  %func = bitcast i32* %addr to void()*
-  call void() %func()
+  %addr = getelementptr i32, ptr %weird_funcs, i32 1
+  call void() %addr()
   ret void
 }
 
@@ -289,7 +288,7 @@ define void @test_simple_tail_call() {
   ret void
 }
 
-define void @test_indirect_tail_call(void()* %func) {
+define void @test_indirect_tail_call(ptr %func) {
 ; CHECK-LABEL: test_indirect_tail_call:
 ; CHECK: br x0
   tail call void() %func()
@@ -297,14 +296,13 @@ define void @test_indirect_tail_call(void()* %func) {
 }
 
 ; Safe to use the unextended address here
-define void @test_indirect_safe_tail_call(i32* %weird_funcs) {
+define void @test_indirect_safe_tail_call(ptr %weird_funcs) {
 ; CHECK-LABEL: test_indirect_safe_tail_call:
 ; CHECK: add w[[ADDR32:[0-9]+]], w0, #4
 ; CHECK-OPT-NOT: ubfx
 ; CHECK-OPT: br x[[ADDR32]]
-  %addr = getelementptr i32, i32* %weird_funcs, i32 1
-  %func = bitcast i32* %addr to void()*
-  tail call void() %func()
+  %addr = getelementptr i32, ptr %weird_funcs, i32 1
+  tail call void() %addr()
   ret void
 }
 
@@ -412,21 +410,20 @@ define void @test_small_smallstruct() {
   ret void
 }
 
-define void @test_bare_frameaddr(i8** %addr) {
+define void @test_bare_frameaddr(ptr %addr) {
 ; CHECK-LABEL: test_bare_frameaddr:
 ; CHECK: add x[[LOCAL:[0-9]+]], sp, #{{[0-9]+}}
 ; CHECK: str w[[LOCAL]],
 
   %ptr = alloca i8
-  store i8* %ptr, i8** %addr, align 4
+  store ptr %ptr, ptr %addr, align 4
   ret void
 }
 
-define void @test_sret_use([8 x i64]* sret([8 x i64]) %out) {
+define void @test_sret_use(ptr sret([8 x i64]) %out) {
 ; CHECK-LABEL: test_sret_use:
 ; CHECK: str xzr, [x8]
-  %addr = getelementptr [8 x i64], [8 x i64]* %out, i32 0, i32 0
-  store i64 0, i64* %addr
+  store i64 0, ptr %out
   ret void
 }
 
@@ -435,10 +432,9 @@ define i64 @test_sret_call() {
 ; CHECK: mov x8, sp
 ; CHECK: bl _test_sret_use
   %arr = alloca [8 x i64]
-  call void @test_sret_use([8 x i64]* sret([8 x i64]) %arr)
+  call void @test_sret_use(ptr sret([8 x i64]) %arr)
 
-  %addr = getelementptr [8 x i64], [8 x i64]* %arr, i32 0, i32 0
-  %val = load i64, i64* %addr
+  %val = load i64, ptr %arr
   ret i64 %val
 }
 
@@ -449,25 +445,25 @@ define double @test_constpool() {
   ret double 1.0e-6
 }
 
-define i8* @test_blockaddress() {
+define ptr @test_blockaddress() {
 ; CHECK-LABEL: test_blockaddress:
 ; CHECK: [[BLOCK:Ltmp[0-9]+]]:
 ; CHECK: adrp x[[PAGE:[0-9]+]], lCPI{{[0-9]+_[0-9]+}}@PAGE
 ; CHECK: ldr x0, [x[[PAGE]], lCPI{{[0-9]+_[0-9]+}}@PAGEOFF]
   br label %dest
 dest:
-  ret i8* blockaddress(@test_blockaddress, %dest)
+  ret ptr blockaddress(@test_blockaddress, %dest)
 }
 
-define i8* @test_indirectbr(i8* %dest) {
+define ptr @test_indirectbr(ptr %dest) {
 ; CHECK-LABEL: test_indirectbr:
 ; CHECK: br x0
-  indirectbr i8* %dest, [label %true, label %false]
+  indirectbr ptr %dest, [label %true, label %false]
 
 true:
-  ret i8* blockaddress(@test_indirectbr, %true)
+  ret ptr blockaddress(@test_indirectbr, %true)
 false:
-  ret i8* blockaddress(@test_indirectbr, %false)
+  ret ptr blockaddress(@test_indirectbr, %false)
 }
 
 ; ISelDAGToDAG tries to fold an offset FI load (in this case var+4) into the
@@ -477,9 +473,9 @@ define float @test_frameindex_offset_load() {
 ; CHECK-LABEL: test_frameindex_offset_load:
 ; CHECK: ldr s0, [sp, #4]
   %arr = alloca float, i32 4, align 8
-  %addr = getelementptr inbounds float, float* %arr, i32 1
+  %addr = getelementptr inbounds float, ptr %arr, i32 1
 
-  %val = load float, float* %addr, align 4
+  %val = load float, ptr %addr, align 4
   ret float %val
 }
 
@@ -491,46 +487,46 @@ define void @test_unaligned_frameindex_offset_store() {
 ; CHECK: str [[VAL]], [x[[ADDR]]]
   %arr = alloca [4 x i32]
 
-  %addr.int = ptrtoint [4 x i32]* %arr to i32
+  %addr.int = ptrtoint ptr %arr to i32
   %addr.nextint = add nuw i32 %addr.int, 2
-  %addr.next = inttoptr i32 %addr.nextint to i32*
-  store i32 42, i32* %addr.next
+  %addr.next = inttoptr i32 %addr.nextint to ptr
+  store i32 42, ptr %addr.next
   ret void
 }
 
 
-define {i64, i64*} @test_pre_idx(i64* %addr) {
+define {i64, ptr} @test_pre_idx(ptr %addr) {
 ; CHECK-LABEL: test_pre_idx:
 
 ; CHECK: add w[[ADDR:[0-9]+]], w0, #8
 ; CHECK: ldr x0, [x[[ADDR]]]
-  %addr.int = ptrtoint i64* %addr to i32
+  %addr.int = ptrtoint ptr %addr to i32
   %addr.next.int = add nuw i32 %addr.int, 8
-  %addr.next = inttoptr i32 %addr.next.int to i64*
-  %val = load i64, i64* %addr.next
+  %addr.next = inttoptr i32 %addr.next.int to ptr
+  %val = load i64, ptr %addr.next
 
-  %tmp = insertvalue {i64, i64*} undef, i64 %val, 0
-  %res = insertvalue {i64, i64*} %tmp, i64* %addr.next, 1
+  %tmp = insertvalue {i64, ptr} undef, i64 %val, 0
+  %res = insertvalue {i64, ptr} %tmp, ptr %addr.next, 1
 
-  ret {i64, i64*} %res
+  ret {i64, ptr} %res
 }
 
 ; Forming a post-indexed load is invalid here since the GEP needs to work when
 ; %addr wraps round to 0.
-define {i64, i64*} @test_invalid_pre_idx(i64* %addr) {
+define {i64, ptr} @test_invalid_pre_idx(ptr %addr) {
 ; CHECK-LABEL: test_invalid_pre_idx:
 ; CHECK: add w1, w0, #8
 ; CHECK: ldr x0, [x1]
-  %addr.next = getelementptr i64, i64* %addr, i32 1
-  %val = load i64, i64* %addr.next
+  %addr.next = getelementptr i64, ptr %addr, i32 1
+  %val = load i64, ptr %addr.next
 
-  %tmp = insertvalue {i64, i64*} undef, i64 %val, 0
-  %res = insertvalue {i64, i64*} %tmp, i64* %addr.next, 1
+  %tmp = insertvalue {i64, ptr} undef, i64 %val, 0
+  %res = insertvalue {i64, ptr} %tmp, ptr %addr.next, 1
 
-  ret {i64, i64*} %res
+  ret {i64, ptr} %res
 }
 
-declare void @callee([8 x i32]*)
+declare void @callee(ptr)
 define void @test_stack_guard() ssp {
 ; CHECK-LABEL: test_stack_guard:
 ; CHECK: adrp x[[GUARD_GOTPAGE:[0-9]+]], ___stack_chk_guard at GOTPAGE
@@ -551,26 +547,26 @@ define void @test_stack_guard() ssp {
 ; CHECK-OPT: [[FAIL]]:
 ; CHECK-OPT-NEXT: bl ___stack_chk_fail
   %arr = alloca [8 x i32]
-  call void @callee([8 x i32]* %arr)
+  call void @callee(ptr %arr)
   ret void
 }
 
 declare i32 @__gxx_personality_v0(...)
-declare void @eat_landingpad_args(i32, i8*, i32)
+declare void @eat_landingpad_args(i32, ptr, i32)
 @_ZTI8Whatever = external global i8
-define void @test_landingpad_marshalling() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test_landingpad_marshalling() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: test_landingpad_marshalling:
 ; CHECK-OPT: mov x2, x1
 ; CHECK-OPT: mov x1, x0
 ; CHECK: bl _eat_landingpad_args
-  invoke void @callee([8 x i32]* undef) to label %done unwind label %lpad
+  invoke void @callee(ptr undef) to label %done unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %exc = landingpad { i8*, i32 }
-          catch i8* @_ZTI8Whatever
-  %pointer = extractvalue { i8*, i32 } %exc, 0
-  %selector = extractvalue { i8*, i32 } %exc, 1
-  call void @eat_landingpad_args(i32 undef, i8* %pointer, i32 %selector)
+  %exc = landingpad { ptr, i32 }
+          catch ptr @_ZTI8Whatever
+  %pointer = extractvalue { ptr, i32 } %exc, 0
+  %selector = extractvalue { ptr, i32 } %exc, 1
+  call void @eat_landingpad_args(i32 undef, ptr %pointer, i32 %selector)
   ret void
 
 done:
@@ -587,16 +583,16 @@ define void @test_dynamic_stackalloc() {
 
 next:
   %val = alloca [8 x i32]
-  call void @callee([8 x i32]* %val)
+  call void @callee(ptr %val)
   ret void
 }
 
-define void @test_asm_memory(i32* %base.addr) {
+define void @test_asm_memory(ptr %base.addr) {
 ; CHECK-LABEL: test_asm_memory:
 ; CHECK: add w[[ADDR:[0-9]+]], w0, #4
 ; CHECK: str wzr, [x[[ADDR]]
-  %addr = getelementptr i32, i32* %base.addr, i32 1
-  call void asm sideeffect "str wzr, $0", "*m"(i32* elementtype(i32) %addr)
+  %addr = getelementptr i32, ptr %base.addr, i32 1
+  call void asm sideeffect "str wzr, $0", "*m"(ptr elementtype(i32) %addr)
   ret void
 }
 
@@ -605,23 +601,23 @@ define void @test_unsafe_asm_memory(i64 %val) {
 ; CHECK: and x[[ADDR:[0-9]+]], x0, #0xffffffff
 ; CHECK: str wzr, [x[[ADDR]]]
   %addr_int = trunc i64 %val to i32
-  %addr = inttoptr i32 %addr_int to i32*
-  call void asm sideeffect "str wzr, $0", "*m"(i32* elementtype(i32) %addr)
+  %addr = inttoptr i32 %addr_int to ptr
+  call void asm sideeffect "str wzr, $0", "*m"(ptr elementtype(i32) %addr)
   ret void
 }
 
-define [9 x i8*] @test_demoted_return(i8* %in) {
+define [9 x ptr] @test_demoted_return(ptr %in) {
 ; CHECK-LABEL: test_demoted_return:
 ; CHECK: str w0, [x8, #32]
-  %res = insertvalue [9 x i8*] undef, i8* %in, 8
-  ret [9 x i8*] %res
+  %res = insertvalue [9 x ptr] undef, ptr %in, 8
+  ret [9 x ptr] %res
 }
 
-define i8* @test_inttoptr(i64 %in) {
+define ptr @test_inttoptr(i64 %in) {
 ; CHECK-LABEL: test_inttoptr:
 ; CHECK: and x0, x0, #0xffffffff
-  %res = inttoptr i64 %in to i8*
-  ret i8* %res
+  %res = inttoptr i64 %in to ptr
+  ret ptr %res
 }
 
 declare i32 @llvm.get.dynamic.area.offset.i32()
@@ -632,28 +628,28 @@ define i32 @test_dynamic_area() {
   ret i32 %res
 }
 
-define void @test_pointer_vec_store(<2 x i8*>* %addr) {
+define void @test_pointer_vec_store(ptr %addr) {
 ; CHECK-LABEL: test_pointer_vec_store:
 ; CHECK: str xzr, [x0]
 ; CHECK-NOT: str
 ; CHECK-NOT: stp
 
-  store <2 x i8*> zeroinitializer, <2 x i8*>* %addr, align 16
+  store <2 x ptr> zeroinitializer, ptr %addr, align 16
   ret void
 }
 
-define <2 x i8*> @test_pointer_vec_load(<2 x i8*>* %addr) {
+define <2 x ptr> @test_pointer_vec_load(ptr %addr) {
 ; CHECK-LABEL: test_pointer_vec_load:
 ; CHECK: ldr d[[TMP:[0-9]+]], [x0]
 ; CHECK: ushll.2d v0, v[[TMP]], #0
-  %val = load <2 x i8*>, <2 x i8*>* %addr, align 16
-  ret <2 x i8*> %val
+  %val = load <2 x ptr>, ptr %addr, align 16
+  ret <2 x ptr> %val
 }
 
-define void @test_inline_asm_mem_pointer(i32* %in) {
+define void @test_inline_asm_mem_pointer(ptr %in) {
 ; CHECK-LABEL: test_inline_asm_mem_pointer:
 ; CHECK: str w0,
-  tail call void asm sideeffect "ldr x0, $0", "rm"(i32* %in)
+  tail call void asm sideeffect "ldr x0, $0", "rm"(ptr %in)
   ret void
 }
 
@@ -678,19 +674,19 @@ define void @test_struct_hi(i32 %hi) nounwind {
 declare void @take_pair([2 x i32])
 declare i64 @get_int()
 
-define i1 @test_icmp_ptr(i8* %in) {
+define i1 @test_icmp_ptr(ptr %in) {
 ; CHECK-LABEL: test_icmp_ptr
 ; CHECK: ubfx x0, x0, #31, #1
-  %res = icmp slt i8* %in, null
+  %res = icmp slt ptr %in, null
   ret i1 %res
 }
 
-define void @test_multiple_icmp_ptr(i8* %l, i8* %r) {
+define void @test_multiple_icmp_ptr(ptr %l, ptr %r) {
 ; CHECK-LABEL: test_multiple_icmp_ptr:
 ; CHECK: tbnz w0, #31, [[FALSEBB:LBB[0-9]+_[0-9]+]]
 ; CHECK: tbnz w1, #31, [[FALSEBB]]
-  %tst1 = icmp sgt i8* %l, inttoptr (i32 -1 to i8*)
-  %tst2 = icmp sgt i8* %r, inttoptr (i32 -1 to i8*)
+  %tst1 = icmp sgt ptr %l, inttoptr (i32 -1 to ptr)
+  %tst2 = icmp sgt ptr %r, inttoptr (i32 -1 to ptr)
   %tst = and i1 %tst1, %tst2
   br i1 %tst, label %true, label %false
 
@@ -702,12 +698,12 @@ false:
   ret void
 }
 
-define void @test_multiple_icmp_ptr_select(i8* %l, i8* %r) {
+define void @test_multiple_icmp_ptr_select(ptr %l, ptr %r) {
 ; CHECK-LABEL: test_multiple_icmp_ptr_select:
 ; CHECK: tbnz w0, #31, [[FALSEBB:LBB[0-9]+_[0-9]+]]
 ; CHECK: tbnz w1, #31, [[FALSEBB]]
-  %tst1 = icmp sgt i8* %l, inttoptr (i32 -1 to i8*)
-  %tst2 = icmp sgt i8* %r, inttoptr (i32 -1 to i8*)
+  %tst1 = icmp sgt ptr %l, inttoptr (i32 -1 to ptr)
+  %tst2 = icmp sgt ptr %r, inttoptr (i32 -1 to ptr)
   %tst = select i1 %tst1, i1 %tst2, i1 false
   br i1 %tst, label %true, label %false
 
@@ -719,7 +715,7 @@ false:
   ret void
 }
 
-define { [18 x i8] }* @test_gep_nonpow2({ [18 x i8] }* %a0, i32 %a1) {
+define ptr @test_gep_nonpow2(ptr %a0, i32 %a1) {
 ; CHECK-LABEL: test_gep_nonpow2:
 ; CHECK-OPT:      mov w[[SIZE:[0-9]+]], #18
 ; CHECK-OPT-NEXT: smaddl x0, w1, w[[SIZE]], x0
@@ -729,8 +725,8 @@ define { [18 x i8] }* @test_gep_nonpow2({ [18 x i8] }* %a0, i32 %a1) {
 ; CHECK-FAST-NEXT: smaddl [[TMP:x[0-9]+]], w1, w[[SIZE]], x0
 ; CHECK-FAST-NEXT: and x0, [[TMP]], #0xffffffff
 ; CHECK-FAST-NEXT: ret
-  %tmp0 = getelementptr inbounds { [18 x i8] }, { [18 x i8] }* %a0, i32 %a1
-  ret { [18 x i8] }* %tmp0
+  %tmp0 = getelementptr inbounds { [18 x i8] }, ptr %a0, i32 %a1
+  ret ptr %tmp0
 }
 
 define void @test_memset(i64 %in, i8 %value)  {
@@ -743,8 +739,8 @@ define void @test_memset(i64 %in, i8 %value)  {
   %ptr.i32 = trunc i64 %in to i32
   %size.64 = lshr i64 %in, 32
   %size = trunc i64 %size.64 to i32
-  %ptr = inttoptr i32 %ptr.i32 to i8*
-  tail call void @llvm.memset.p0i8.i32(i8* align 4 %ptr, i8 %value, i32 %size, i1 false)
+  %ptr = inttoptr i32 %ptr.i32 to ptr
+  tail call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 %value, i32 %size, i1 false)
   ret void
 }
 
@@ -757,9 +753,9 @@ define void @test_bzero(i64 %in)  {
   %ptr.i32 = trunc i64 %in to i32
   %size.64 = lshr i64 %in, 32
   %size = trunc i64 %size.64 to i32
-  %ptr = inttoptr i32 %ptr.i32 to i8*
-  tail call void @llvm.memset.p0i8.i32(i8* align 4 %ptr, i8 0, i32 %size, i1 false)
+  %ptr = inttoptr i32 %ptr.i32 to ptr
+  tail call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 0, i32 %size, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)

diff  --git a/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll b/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll
index 212c9408d6e45..3950a3026769c 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-varargs.ll
@@ -11,9 +11,8 @@ define void @varargs_callee(double %x, ...) nounwind {
 ; CHECK-NEXT:    str x4, [sp, #8]
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
-  %list = alloca i8*, align 8
-  %listx = bitcast i8** %list to i8*
-  call void @llvm.va_start(i8* nonnull %listx)
+  %list = alloca ptr, align 8
+  call void @llvm.va_start(ptr nonnull %list)
   ret void
 }
 
@@ -25,9 +24,8 @@ define void @varargs_callee_manyargs(i64, i64, i64, i64, i64, ...) nounwind {
 ; CHECK-NEXT:    str x8, [sp, #8]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %list = alloca i8*, align 8
-  %listx = bitcast i8** %list to i8*
-  call void @llvm.va_start(i8* nonnull %listx)
+  %list = alloca ptr, align 8
+  call void @llvm.va_start(ptr nonnull %list)
   ret void
 }
 
@@ -95,4 +93,4 @@ define void @varargs_many_argscalleer() nounwind {
 }
 
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)

diff  --git a/llvm/test/CodeGen/AArch64/assertion-rc-mismatch.ll b/llvm/test/CodeGen/AArch64/assertion-rc-mismatch.ll
index c216c448e11b5..076fd044753c7 100644
--- a/llvm/test/CodeGen/AArch64/assertion-rc-mismatch.ll
+++ b/llvm/test/CodeGen/AArch64/assertion-rc-mismatch.ll
@@ -7,18 +7,18 @@ cmp:
   %lnot.i.i = icmp eq i64 %encodedBase, 0
   br i1 %lnot.i.i, label %if, label %else
 if:
-  %tmp1 = call i8* @llvm.returnaddress(i32 0)
+  %tmp1 = call ptr @llvm.returnaddress(i32 0)
   br label %end
 else:
-  %tmp3 = call i8* @llvm.returnaddress(i32 0)
-  %ptr = getelementptr inbounds i8, i8* %tmp3, i64 -16
-  %ld = load i8, i8* %ptr, align 4
-  %tmp2 = inttoptr i8 %ld to i8*
+  %tmp3 = call ptr @llvm.returnaddress(i32 0)
+  %ptr = getelementptr inbounds i8, ptr %tmp3, i64 -16
+  %ld = load i8, ptr %ptr, align 4
+  %tmp2 = inttoptr i8 %ld to ptr
   br label %end
 end:
-  %tmp = phi i8* [ %tmp1, %if ], [ %tmp2, %else ]
-  %coerce.val.pi56 = ptrtoint i8* %tmp to i64
+  %tmp = phi ptr [ %tmp1, %if ], [ %tmp2, %else ]
+  %coerce.val.pi56 = ptrtoint ptr %tmp to i64
   ret i64 %coerce.val.pi56
 }
 
-declare i8* @llvm.returnaddress(i32)
+declare ptr @llvm.returnaddress(i32)

diff  --git a/llvm/test/CodeGen/AArch64/atomic-ops-ldapr.ll b/llvm/test/CodeGen/AArch64/atomic-ops-ldapr.ll
index bdd6f60b814a1..06f913a55cd43 100644
--- a/llvm/test/CodeGen/AArch64/atomic-ops-ldapr.ll
+++ b/llvm/test/CodeGen/AArch64/atomic-ops-ldapr.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+rcpc -fast-isel=0 -global-isel=false -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+rcpc -fast-isel=1 -global-isel=false -verify-machineinstrs < %s | FileCheck %s --check-prefix=FAST-ISEL
 
-define i8 @test_load_8_acq(i8* %addr) {
+define i8 @test_load_8_acq(ptr %addr) {
 ; CHECK-LABEL: test_load_8_acq:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldaprb w0, [x0]
@@ -12,11 +12,11 @@ define i8 @test_load_8_acq(i8* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldaprb w0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i8, i8* %addr acquire, align 1
+  %val = load atomic i8, ptr %addr acquire, align 1
   ret i8 %val
 }
 
-define i8 @test_load_8_csc(i8* %addr) {
+define i8 @test_load_8_csc(ptr %addr) {
 ; CHECK-LABEL: test_load_8_csc:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldarb w0, [x0]
@@ -26,11 +26,11 @@ define i8 @test_load_8_csc(i8* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldarb w0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i8, i8* %addr seq_cst, align 1
+  %val = load atomic i8, ptr %addr seq_cst, align 1
   ret i8 %val
 }
 
-define i16 @test_load_16_acq(i16* %addr) {
+define i16 @test_load_16_acq(ptr %addr) {
 ; CHECK-LABEL: test_load_16_acq:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldaprh w0, [x0]
@@ -40,11 +40,11 @@ define i16 @test_load_16_acq(i16* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldaprh w0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i16, i16* %addr acquire, align 2
+  %val = load atomic i16, ptr %addr acquire, align 2
   ret i16 %val
 }
 
-define i16 @test_load_16_csc(i16* %addr) {
+define i16 @test_load_16_csc(ptr %addr) {
 ; CHECK-LABEL: test_load_16_csc:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldarh w0, [x0]
@@ -54,11 +54,11 @@ define i16 @test_load_16_csc(i16* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldarh w0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i16, i16* %addr seq_cst, align 2
+  %val = load atomic i16, ptr %addr seq_cst, align 2
   ret i16 %val
 }
 
-define i32 @test_load_32_acq(i32* %addr) {
+define i32 @test_load_32_acq(ptr %addr) {
 ; CHECK-LABEL: test_load_32_acq:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldapr w0, [x0]
@@ -68,11 +68,11 @@ define i32 @test_load_32_acq(i32* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldapr w0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i32, i32* %addr acquire, align 4
+  %val = load atomic i32, ptr %addr acquire, align 4
   ret i32 %val
 }
 
-define i32 @test_load_32_csc(i32* %addr) {
+define i32 @test_load_32_csc(ptr %addr) {
 ; CHECK-LABEL: test_load_32_csc:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldar w0, [x0]
@@ -82,11 +82,11 @@ define i32 @test_load_32_csc(i32* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldar w0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i32, i32* %addr seq_cst, align 4
+  %val = load atomic i32, ptr %addr seq_cst, align 4
   ret i32 %val
 }
 
-define i64 @test_load_64_acq(i64* %addr) {
+define i64 @test_load_64_acq(ptr %addr) {
 ; CHECK-LABEL: test_load_64_acq:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldapr x0, [x0]
@@ -96,11 +96,11 @@ define i64 @test_load_64_acq(i64* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldapr x0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i64, i64* %addr acquire, align 8
+  %val = load atomic i64, ptr %addr acquire, align 8
   ret i64 %val
 }
 
-define i64 @test_load_64_csc(i64* %addr) {
+define i64 @test_load_64_csc(ptr %addr) {
 ; CHECK-LABEL: test_load_64_csc:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldar x0, [x0]
@@ -110,6 +110,6 @@ define i64 @test_load_64_csc(i64* %addr) {
 ; FAST-ISEL:       // %bb.0:
 ; FAST-ISEL-NEXT:    ldar x0, [x0]
 ; FAST-ISEL-NEXT:    ret
-  %val = load atomic i64, i64* %addr seq_cst, align 8
+  %val = load atomic i64, ptr %addr seq_cst, align 8
   ret i64 %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/atomic-ops-lse.ll b/llvm/test/CodeGen/AArch64/atomic-ops-lse.ll
index 24af03cadb233..93a2d6cedc3a7 100644
--- a/llvm/test/CodeGen/AArch64/atomic-ops-lse.ll
+++ b/llvm/test/CodeGen/AArch64/atomic-ops-lse.ll
@@ -25,7 +25,7 @@ define dso_local i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw add ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -46,7 +46,7 @@ define dso_local i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw add ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -67,7 +67,7 @@ define dso_local i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw add ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -88,7 +88,7 @@ define dso_local i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw add ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -109,7 +109,7 @@ define dso_local void @test_atomic_load_add_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i32* @var32, i32 %offset seq_cst
+   atomicrmw add ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -129,7 +129,7 @@ define dso_local void @test_atomic_load_add_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i64* @var64, i64 %offset seq_cst
+   atomicrmw add ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -149,7 +149,7 @@ define dso_local i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw or ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -170,7 +170,7 @@ define dso_local i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw or ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -191,7 +191,7 @@ define dso_local i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw or ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -212,7 +212,7 @@ define dso_local i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw or ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -233,7 +233,7 @@ define dso_local void @test_atomic_load_or_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i32* @var32, i32 %offset seq_cst
+   atomicrmw or ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -253,7 +253,7 @@ define dso_local void @test_atomic_load_or_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i64* @var64, i64 %offset seq_cst
+   atomicrmw or ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -273,7 +273,7 @@ define dso_local i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw xor ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -294,7 +294,7 @@ define dso_local i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw xor ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -315,7 +315,7 @@ define dso_local i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw xor ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -336,7 +336,7 @@ define dso_local i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw xor ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -357,7 +357,7 @@ define dso_local void @test_atomic_load_xor_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i32* @var32, i32 %offset seq_cst
+   atomicrmw xor ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -377,7 +377,7 @@ define dso_local void @test_atomic_load_xor_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i64* @var64, i64 %offset seq_cst
+   atomicrmw xor ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -404,7 +404,7 @@ define dso_local i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw min ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -432,7 +432,7 @@ define dso_local i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw min ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -459,7 +459,7 @@ define dso_local i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw min ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -486,7 +486,7 @@ define dso_local i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw min ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -512,7 +512,7 @@ define dso_local void @test_atomic_load_min_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i32* @var32, i32 %offset seq_cst
+   atomicrmw min ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -537,7 +537,7 @@ define dso_local void @test_atomic_load_min_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i64* @var64, i64 %offset seq_cst
+   atomicrmw min ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -564,7 +564,7 @@ define dso_local i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw umin ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -592,7 +592,7 @@ define dso_local i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw umin ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -619,7 +619,7 @@ define dso_local i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw umin ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -646,7 +646,7 @@ define dso_local i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw umin ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -672,7 +672,7 @@ define dso_local void @test_atomic_load_umin_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i32* @var32, i32 %offset seq_cst
+   atomicrmw umin ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -697,7 +697,7 @@ define dso_local void @test_atomic_load_umin_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i64* @var64, i64 %offset seq_cst
+   atomicrmw umin ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -724,7 +724,7 @@ define dso_local i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw max ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -752,7 +752,7 @@ define dso_local i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw max ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -779,7 +779,7 @@ define dso_local i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw max ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -806,7 +806,7 @@ define dso_local i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw max ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -832,7 +832,7 @@ define dso_local void @test_atomic_load_max_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i32* @var32, i32 %offset seq_cst
+   atomicrmw max ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -857,7 +857,7 @@ define dso_local void @test_atomic_load_max_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i64* @var64, i64 %offset seq_cst
+   atomicrmw max ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -884,7 +884,7 @@ define dso_local i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw umax ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -912,7 +912,7 @@ define dso_local i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw umax ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -939,7 +939,7 @@ define dso_local i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw umax ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -966,7 +966,7 @@ define dso_local i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw umax ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -992,7 +992,7 @@ define dso_local void @test_atomic_load_umax_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i32* @var32, i32 %offset seq_cst
+   atomicrmw umax ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -1017,7 +1017,7 @@ define dso_local void @test_atomic_load_umax_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i64* @var64, i64 %offset seq_cst
+   atomicrmw umax ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -1037,7 +1037,7 @@ define dso_local i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw xchg ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -1058,7 +1058,7 @@ define dso_local i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw xchg ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -1079,7 +1079,7 @@ define dso_local i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw xchg ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -1100,7 +1100,7 @@ define dso_local i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw xchg ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -1121,7 +1121,7 @@ define dso_local void @test_atomic_load_xchg_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i32* @var32, i32 %offset seq_cst
+   atomicrmw xchg ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -1142,7 +1142,7 @@ define dso_local void @test_atomic_load_xchg_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i64* @var64, i64 %offset seq_cst
+   atomicrmw xchg ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -1163,7 +1163,7 @@ define dso_local i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+   %pair = cmpxchg ptr @var8, i8 %wanted, i8 %new acquire acquire
    %old = extractvalue { i8, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1188,7 +1188,7 @@ define dso_local i1 @test_atomic_cmpxchg_i8_1(i8 %wanted, i8 %new) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cset w0, eq
 ; OUTLINE-ATOMICS-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+   %pair = cmpxchg ptr @var8, i8 %wanted, i8 %new acquire acquire
    %success = extractvalue { i8, i1 } %pair, 1
 
 ; CHECK-NOT: dmb
@@ -1212,7 +1212,7 @@ define dso_local i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new acquire acquire
+   %pair = cmpxchg ptr @var16, i16 %wanted, i16 %new acquire acquire
    %old = extractvalue { i16, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1237,7 +1237,7 @@ define dso_local i1 @test_atomic_cmpxchg_i16_1(i16 %wanted, i16 %new) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    cset w0, eq
 ; OUTLINE-ATOMICS-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new acquire acquire
+   %pair = cmpxchg ptr @var16, i16 %wanted, i16 %new acquire acquire
    %success = extractvalue { i16, i1 } %pair, 1
 
 ; CHECK-NOT: dmb
@@ -1262,7 +1262,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new acquire acquire
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new acquire acquire
    %old = extractvalue { i32, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1285,7 +1285,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32_monotonic_acquire(i32 %wanted, i32
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new monotonic acquire
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new monotonic acquire
    %old = extractvalue { i32, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1308,7 +1308,7 @@ define dso_local i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new acquire acquire
+   %pair = cmpxchg ptr @var64, i64 %wanted, i64 %new acquire acquire
    %old = extractvalue { i64, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1331,7 +1331,7 @@ define dso_local i128 @test_atomic_cmpxchg_i128(i128 %wanted, i128 %new) nounwin
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas16_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i128* @var128, i128 %wanted, i128 %new acquire acquire
+   %pair = cmpxchg ptr @var128, i128 %wanted, i128 %new acquire acquire
    %old = extractvalue { i128, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1354,7 +1354,7 @@ define dso_local i128 @test_atomic_cmpxchg_i128_monotonic_seqcst(i128 %wanted, i
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas16_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i128* @var128, i128 %wanted, i128 %new monotonic seq_cst
+   %pair = cmpxchg ptr @var128, i128 %wanted, i128 %new monotonic seq_cst
    %old = extractvalue { i128, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1377,7 +1377,7 @@ define dso_local i128 @test_atomic_cmpxchg_i128_release_acquire(i128 %wanted, i1
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas16_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i128* @var128, i128 %wanted, i128 %new release acquire
+   %pair = cmpxchg ptr @var128, i128 %wanted, i128 %new release acquire
    %old = extractvalue { i128, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -1401,7 +1401,7 @@ define dso_local i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i8* @var8, i8 %offset seq_cst
+  %old = atomicrmw sub ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -1424,7 +1424,7 @@ define dso_local i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i16* @var16, i16 %offset seq_cst
+  %old = atomicrmw sub ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -1447,7 +1447,7 @@ define dso_local i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i32* @var32, i32 %offset seq_cst
+  %old = atomicrmw sub ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -1470,7 +1470,7 @@ define dso_local i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
+  %old = atomicrmw sub ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -1493,7 +1493,7 @@ define dso_local void @test_atomic_load_sub_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i32* @var32, i32 %offset seq_cst
+  atomicrmw sub ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -1516,7 +1516,7 @@ define dso_local void @test_atomic_load_sub_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i64* @var64, i64 %offset seq_cst
+  atomicrmw sub ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -1539,7 +1539,7 @@ define dso_local i8 @test_atomic_load_sub_i8_neg_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i8* @var8, i8 -1 seq_cst
+  %old = atomicrmw sub ptr @var8, i8 -1 seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[IMM:[0-9]+]], #1
@@ -1562,7 +1562,7 @@ define dso_local i16 @test_atomic_load_sub_i16_neg_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i16* @var16, i16 -1 seq_cst
+  %old = atomicrmw sub ptr @var16, i16 -1 seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[IMM:[0-9]+]], #1
@@ -1585,7 +1585,7 @@ define dso_local i32 @test_atomic_load_sub_i32_neg_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i32* @var32, i32 -1 seq_cst
+  %old = atomicrmw sub ptr @var32, i32 -1 seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[IMM:[0-9]+]], #1
@@ -1608,7 +1608,7 @@ define dso_local i64 @test_atomic_load_sub_i64_neg_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i64* @var64, i64 -1 seq_cst
+  %old = atomicrmw sub ptr @var64, i64 -1 seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[IMM:[0-9]+]], #1
@@ -1631,7 +1631,7 @@ define dso_local i8 @test_atomic_load_sub_i8_neg_arg(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %neg = sub i8 0, %offset
-  %old = atomicrmw sub i8* @var8, i8 %neg seq_cst
+  %old = atomicrmw sub ptr @var8, i8 %neg seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -1653,7 +1653,7 @@ define dso_local i16 @test_atomic_load_sub_i16_neg_arg(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %neg = sub i16 0, %offset
-  %old = atomicrmw sub i16* @var16, i16 %neg seq_cst
+  %old = atomicrmw sub ptr @var16, i16 %neg seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -1675,7 +1675,7 @@ define dso_local i32 @test_atomic_load_sub_i32_neg_arg(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %neg = sub i32 0, %offset
-  %old = atomicrmw sub i32* @var32, i32 %neg seq_cst
+  %old = atomicrmw sub ptr @var32, i32 %neg seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -1697,7 +1697,7 @@ define dso_local i64 @test_atomic_load_sub_i64_neg_arg(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %neg = sub i64 0, %offset
-  %old = atomicrmw sub i64* @var64, i64 %neg seq_cst
+  %old = atomicrmw sub ptr @var64, i64 %neg seq_cst
 
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -1719,7 +1719,7 @@ define dso_local i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i8* @var8, i8 %offset seq_cst
+  %old = atomicrmw and ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -1741,7 +1741,7 @@ define dso_local i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i16* @var16, i16 %offset seq_cst
+  %old = atomicrmw and ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -1763,7 +1763,7 @@ define dso_local i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i32* @var32, i32 %offset seq_cst
+  %old = atomicrmw and ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -1785,7 +1785,7 @@ define dso_local i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i64* @var64, i64 %offset seq_cst
+  %old = atomicrmw and ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -1807,7 +1807,7 @@ define dso_local i8 @test_atomic_load_and_i8_inv_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i8* @var8, i8 -2 seq_cst
+  %old = atomicrmw and ptr @var8, i8 -2 seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[CONST:[0-9]+]], #1
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -1828,7 +1828,7 @@ define dso_local i16 @test_atomic_load_and_i16_inv_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i16* @var16, i16 -2 seq_cst
+  %old = atomicrmw and ptr @var16, i16 -2 seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[CONST:[0-9]+]], #1
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -1849,7 +1849,7 @@ define dso_local i32 @test_atomic_load_and_i32_inv_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i32* @var32, i32 -2 seq_cst
+  %old = atomicrmw and ptr @var32, i32 -2 seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[CONST:[0-9]+]], #1
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -1870,7 +1870,7 @@ define dso_local i64 @test_atomic_load_and_i64_inv_imm() nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i64* @var64, i64 -2 seq_cst
+  %old = atomicrmw and ptr @var64, i64 -2 seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mov w[[CONST:[0-9]+]], #1
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -1891,7 +1891,7 @@ define dso_local i8 @test_atomic_load_and_i8_inv_arg(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %inv = xor i8 %offset, -1
-  %old = atomicrmw and i8* @var8, i8 %inv seq_cst
+  %old = atomicrmw and ptr @var8, i8 %inv seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -1911,7 +1911,7 @@ define dso_local i16 @test_atomic_load_and_i16_inv_arg(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %inv = xor i16 %offset, -1
-  %old = atomicrmw and i16* @var16, i16 %inv seq_cst
+  %old = atomicrmw and ptr @var16, i16 %inv seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -1931,7 +1931,7 @@ define dso_local i32 @test_atomic_load_and_i32_inv_arg(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %inv = xor i32 %offset, -1
-  %old = atomicrmw and i32* @var32, i32 %inv seq_cst
+  %old = atomicrmw and ptr @var32, i32 %inv seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -1951,7 +1951,7 @@ define dso_local i64 @test_atomic_load_and_i64_inv_arg(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
   %inv = xor i64 %offset, -1
-  %old = atomicrmw and i64* @var64, i64 %inv seq_cst
+  %old = atomicrmw and ptr @var64, i64 %inv seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -1971,7 +1971,7 @@ define dso_local void @test_atomic_load_and_i32_noret(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i32* @var32, i32 %offset seq_cst
+  atomicrmw and ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -1993,7 +1993,7 @@ define dso_local void @test_atomic_load_and_i64_noret(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i64* @var64, i64 %offset seq_cst
+  atomicrmw and ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -2014,7 +2014,7 @@ define dso_local i8 @test_atomic_load_add_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw add ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -2035,7 +2035,7 @@ define dso_local i16 @test_atomic_load_add_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw add ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -2056,7 +2056,7 @@ define dso_local i32 @test_atomic_load_add_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw add ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2077,7 +2077,7 @@ define dso_local i64 @test_atomic_load_add_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw add ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2098,7 +2098,7 @@ define dso_local void @test_atomic_load_add_i32_noret_acq_rel(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i32* @var32, i32 %offset acq_rel
+   atomicrmw add ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2118,7 +2118,7 @@ define dso_local void @test_atomic_load_add_i64_noret_acq_rel(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i64* @var64, i64 %offset acq_rel
+   atomicrmw add ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2138,7 +2138,7 @@ define dso_local i8 @test_atomic_load_add_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i8* @var8, i8 %offset acquire
+   %old = atomicrmw add ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -2159,7 +2159,7 @@ define dso_local i16 @test_atomic_load_add_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i16* @var16, i16 %offset acquire
+   %old = atomicrmw add ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -2180,7 +2180,7 @@ define dso_local i32 @test_atomic_load_add_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i32* @var32, i32 %offset acquire
+   %old = atomicrmw add ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2201,7 +2201,7 @@ define dso_local i64 @test_atomic_load_add_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i64* @var64, i64 %offset acquire
+   %old = atomicrmw add ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2222,7 +2222,7 @@ define dso_local void @test_atomic_load_add_i32_noret_acquire(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i32* @var32, i32 %offset acquire
+   atomicrmw add ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2242,7 +2242,7 @@ define dso_local void @test_atomic_load_add_i64_noret_acquire(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i64* @var64, i64 %offset acquire
+   atomicrmw add ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2262,7 +2262,7 @@ define dso_local i8 @test_atomic_load_add_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i8* @var8, i8 %offset monotonic
+   %old = atomicrmw add ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -2283,7 +2283,7 @@ define dso_local i16 @test_atomic_load_add_i16_monotonic(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i16* @var16, i16 %offset monotonic
+   %old = atomicrmw add ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -2304,7 +2304,7 @@ define dso_local i32 @test_atomic_load_add_i32_monotonic(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i32* @var32, i32 %offset monotonic
+   %old = atomicrmw add ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2325,7 +2325,7 @@ define dso_local i64 @test_atomic_load_add_i64_monotonic(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i64* @var64, i64 %offset monotonic
+   %old = atomicrmw add ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2346,7 +2346,7 @@ define dso_local void @test_atomic_load_add_i32_noret_monotonic(i32 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i32* @var32, i32 %offset monotonic
+   atomicrmw add ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2366,7 +2366,7 @@ define dso_local void @test_atomic_load_add_i64_noret_monotonic(i64 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i64* @var64, i64 %offset monotonic
+   atomicrmw add ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2386,7 +2386,7 @@ define dso_local i8 @test_atomic_load_add_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i8* @var8, i8 %offset release
+   %old = atomicrmw add ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -2407,7 +2407,7 @@ define dso_local i16 @test_atomic_load_add_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i16* @var16, i16 %offset release
+   %old = atomicrmw add ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -2428,7 +2428,7 @@ define dso_local i32 @test_atomic_load_add_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i32* @var32, i32 %offset release
+   %old = atomicrmw add ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2449,7 +2449,7 @@ define dso_local i64 @test_atomic_load_add_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i64* @var64, i64 %offset release
+   %old = atomicrmw add ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2470,7 +2470,7 @@ define dso_local void @test_atomic_load_add_i32_noret_release(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i32* @var32, i32 %offset release
+   atomicrmw add ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2490,7 +2490,7 @@ define dso_local void @test_atomic_load_add_i64_noret_release(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i64* @var64, i64 %offset release
+   atomicrmw add ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2510,7 +2510,7 @@ define dso_local i8 @test_atomic_load_add_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw add ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -2531,7 +2531,7 @@ define dso_local i16 @test_atomic_load_add_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw add ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -2552,7 +2552,7 @@ define dso_local i32 @test_atomic_load_add_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw add ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2573,7 +2573,7 @@ define dso_local i64 @test_atomic_load_add_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw add i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw add ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2594,7 +2594,7 @@ define dso_local void @test_atomic_load_add_i32_noret_seq_cst(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i32* @var32, i32 %offset seq_cst
+   atomicrmw add ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -2614,7 +2614,7 @@ define dso_local void @test_atomic_load_add_i64_noret_seq_cst(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw add i64* @var64, i64 %offset seq_cst
+   atomicrmw add ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -2635,7 +2635,7 @@ define dso_local i8 @test_atomic_load_and_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i8* @var8, i8 %offset acq_rel
+  %old = atomicrmw and ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -2657,7 +2657,7 @@ define dso_local i16 @test_atomic_load_and_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i16* @var16, i16 %offset acq_rel
+  %old = atomicrmw and ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -2679,7 +2679,7 @@ define dso_local i32 @test_atomic_load_and_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i32* @var32, i32 %offset acq_rel
+  %old = atomicrmw and ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -2701,7 +2701,7 @@ define dso_local i64 @test_atomic_load_and_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i64* @var64, i64 %offset acq_rel
+  %old = atomicrmw and ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -2723,7 +2723,7 @@ define dso_local void @test_atomic_load_and_i32_noret_acq_rel(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i32* @var32, i32 %offset acq_rel
+  atomicrmw and ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -2745,7 +2745,7 @@ define dso_local void @test_atomic_load_and_i64_noret_acq_rel(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i64* @var64, i64 %offset acq_rel
+  atomicrmw and ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -2767,7 +2767,7 @@ define dso_local i8 @test_atomic_load_and_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i8* @var8, i8 %offset acquire
+  %old = atomicrmw and ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -2789,7 +2789,7 @@ define dso_local i16 @test_atomic_load_and_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i16* @var16, i16 %offset acquire
+  %old = atomicrmw and ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -2811,7 +2811,7 @@ define dso_local i32 @test_atomic_load_and_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i32* @var32, i32 %offset acquire
+  %old = atomicrmw and ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -2833,7 +2833,7 @@ define dso_local i64 @test_atomic_load_and_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i64* @var64, i64 %offset acquire
+  %old = atomicrmw and ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -2855,7 +2855,7 @@ define dso_local void @test_atomic_load_and_i32_noret_acquire(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i32* @var32, i32 %offset acquire
+  atomicrmw and ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -2877,7 +2877,7 @@ define dso_local void @test_atomic_load_and_i64_noret_acquire(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i64* @var64, i64 %offset acquire
+  atomicrmw and ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -2899,7 +2899,7 @@ define dso_local i8 @test_atomic_load_and_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr1_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i8* @var8, i8 %offset monotonic
+  %old = atomicrmw and ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -2921,7 +2921,7 @@ define dso_local i16 @test_atomic_load_and_i16_monotonic(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr2_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i16* @var16, i16 %offset monotonic
+  %old = atomicrmw and ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -2943,7 +2943,7 @@ define dso_local i32 @test_atomic_load_and_i32_monotonic(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i32* @var32, i32 %offset monotonic
+  %old = atomicrmw and ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -2965,7 +2965,7 @@ define dso_local i64 @test_atomic_load_and_i64_monotonic(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i64* @var64, i64 %offset monotonic
+  %old = atomicrmw and ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -2987,7 +2987,7 @@ define dso_local void @test_atomic_load_and_i32_noret_monotonic(i32 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i32* @var32, i32 %offset monotonic
+  atomicrmw and ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -3009,7 +3009,7 @@ define dso_local void @test_atomic_load_and_i64_noret_monotonic(i64 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i64* @var64, i64 %offset monotonic
+  atomicrmw and ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -3031,7 +3031,7 @@ define dso_local i8 @test_atomic_load_and_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr1_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i8* @var8, i8 %offset release
+  %old = atomicrmw and ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -3053,7 +3053,7 @@ define dso_local i16 @test_atomic_load_and_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr2_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i16* @var16, i16 %offset release
+  %old = atomicrmw and ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -3075,7 +3075,7 @@ define dso_local i32 @test_atomic_load_and_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i32* @var32, i32 %offset release
+  %old = atomicrmw and ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -3097,7 +3097,7 @@ define dso_local i64 @test_atomic_load_and_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i64* @var64, i64 %offset release
+  %old = atomicrmw and ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -3119,7 +3119,7 @@ define dso_local void @test_atomic_load_and_i32_noret_release(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i32* @var32, i32 %offset release
+  atomicrmw and ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -3141,7 +3141,7 @@ define dso_local void @test_atomic_load_and_i64_noret_release(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i64* @var64, i64 %offset release
+  atomicrmw and ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -3163,7 +3163,7 @@ define dso_local i8 @test_atomic_load_and_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i8* @var8, i8 %offset seq_cst
+  %old = atomicrmw and ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -3185,7 +3185,7 @@ define dso_local i16 @test_atomic_load_and_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i16* @var16, i16 %offset seq_cst
+  %old = atomicrmw and ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -3207,7 +3207,7 @@ define dso_local i32 @test_atomic_load_and_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i32* @var32, i32 %offset seq_cst
+  %old = atomicrmw and ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -3229,7 +3229,7 @@ define dso_local i64 @test_atomic_load_and_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw and i64* @var64, i64 %offset seq_cst
+  %old = atomicrmw and ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -3251,7 +3251,7 @@ define dso_local void @test_atomic_load_and_i32_noret_seq_cst(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i32* @var32, i32 %offset seq_cst
+  atomicrmw and ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn w[[NOT:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -3273,7 +3273,7 @@ define dso_local void @test_atomic_load_and_i64_noret_seq_cst(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldclr8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw and i64* @var64, i64 %offset seq_cst
+  atomicrmw and ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: mvn x[[NOT:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -3294,7 +3294,7 @@ define dso_local i8 @test_atomic_cmpxchg_i8_acquire(i8 %wanted, i8 %new) nounwin
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+   %pair = cmpxchg ptr @var8, i8 %wanted, i8 %new acquire acquire
    %old = extractvalue { i8, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3317,7 +3317,7 @@ define dso_local i16 @test_atomic_cmpxchg_i16_acquire(i16 %wanted, i16 %new) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new acquire acquire
+   %pair = cmpxchg ptr @var16, i16 %wanted, i16 %new acquire acquire
    %old = extractvalue { i16, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3340,7 +3340,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32_acquire(i32 %wanted, i32 %new) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new acquire acquire
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new acquire acquire
    %old = extractvalue { i32, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3363,7 +3363,7 @@ define dso_local i64 @test_atomic_cmpxchg_i64_acquire(i64 %wanted, i64 %new) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new acquire acquire
+   %pair = cmpxchg ptr @var64, i64 %wanted, i64 %new acquire acquire
    %old = extractvalue { i64, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3386,7 +3386,7 @@ define dso_local i128 @test_atomic_cmpxchg_i128_acquire(i128 %wanted, i128 %new)
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas16_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i128* @var128, i128 %wanted, i128 %new acquire acquire
+   %pair = cmpxchg ptr @var128, i128 %wanted, i128 %new acquire acquire
    %old = extractvalue { i128, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3409,7 +3409,7 @@ define dso_local i8 @test_atomic_cmpxchg_i8_monotonic(i8 %wanted, i8 %new) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas1_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new monotonic monotonic
+   %pair = cmpxchg ptr @var8, i8 %wanted, i8 %new monotonic monotonic
    %old = extractvalue { i8, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3432,7 +3432,7 @@ define dso_local i16 @test_atomic_cmpxchg_i16_monotonic(i16 %wanted, i16 %new) n
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas2_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new monotonic monotonic
+   %pair = cmpxchg ptr @var16, i16 %wanted, i16 %new monotonic monotonic
    %old = extractvalue { i16, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3455,7 +3455,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32_monotonic(i32 %wanted, i32 %new) n
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new monotonic monotonic
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new monotonic monotonic
    %old = extractvalue { i32, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3478,7 +3478,7 @@ define dso_local i64 @test_atomic_cmpxchg_i64_monotonic(i64 %wanted, i64 %new) n
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+   %pair = cmpxchg ptr @var64, i64 %wanted, i64 %new monotonic monotonic
    %old = extractvalue { i64, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3501,7 +3501,7 @@ define dso_local i128 @test_atomic_cmpxchg_i128_monotonic(i128 %wanted, i128 %ne
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas16_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i128* @var128, i128 %wanted, i128 %new monotonic monotonic
+   %pair = cmpxchg ptr @var128, i128 %wanted, i128 %new monotonic monotonic
    %old = extractvalue { i128, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3524,7 +3524,7 @@ define dso_local i8 @test_atomic_cmpxchg_i8_seq_cst(i8 %wanted, i8 %new) nounwin
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new seq_cst seq_cst
+   %pair = cmpxchg ptr @var8, i8 %wanted, i8 %new seq_cst seq_cst
    %old = extractvalue { i8, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3547,7 +3547,7 @@ define dso_local i16 @test_atomic_cmpxchg_i16_seq_cst(i16 %wanted, i16 %new) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+   %pair = cmpxchg ptr @var16, i16 %wanted, i16 %new seq_cst seq_cst
    %old = extractvalue { i16, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3570,7 +3570,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32_seq_cst(i32 %wanted, i32 %new) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new seq_cst seq_cst
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new seq_cst seq_cst
    %old = extractvalue { i32, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3593,7 +3593,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32_monotonic_seq_cst(i32 %wanted, i32
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new monotonic seq_cst
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new monotonic seq_cst
    %old = extractvalue { i32, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3616,7 +3616,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32_release_acquire(i32 %wanted, i32 %
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new release acquire
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new release acquire
    %old = extractvalue { i32, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3639,7 +3639,7 @@ define dso_local i64 @test_atomic_cmpxchg_i64_seq_cst(i64 %wanted, i64 %new) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new seq_cst seq_cst
+   %pair = cmpxchg ptr @var64, i64 %wanted, i64 %new seq_cst seq_cst
    %old = extractvalue { i64, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3662,7 +3662,7 @@ define dso_local i128 @test_atomic_cmpxchg_i128_seq_cst(i128 %wanted, i128 %new)
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_cas16_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %pair = cmpxchg i128* @var128, i128 %wanted, i128 %new seq_cst seq_cst
+   %pair = cmpxchg ptr @var128, i128 %wanted, i128 %new seq_cst seq_cst
    %old = extractvalue { i128, i1 } %pair, 0
 
 ; CHECK-NOT: dmb
@@ -3692,7 +3692,7 @@ define dso_local i8 @test_atomic_load_max_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw max ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -3720,7 +3720,7 @@ define dso_local i16 @test_atomic_load_max_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw max ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -3747,7 +3747,7 @@ define dso_local i32 @test_atomic_load_max_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw max ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -3774,7 +3774,7 @@ define dso_local i64 @test_atomic_load_max_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw max ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -3800,7 +3800,7 @@ define dso_local void @test_atomic_load_max_i32_noret_acq_rel(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i32* @var32, i32 %offset acq_rel
+   atomicrmw max ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -3825,7 +3825,7 @@ define dso_local void @test_atomic_load_max_i64_noret_acq_rel(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i64* @var64, i64 %offset acq_rel
+   atomicrmw max ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -3852,7 +3852,7 @@ define dso_local i8 @test_atomic_load_max_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i8* @var8, i8 %offset acquire
+   %old = atomicrmw max ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -3880,7 +3880,7 @@ define dso_local i16 @test_atomic_load_max_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i16* @var16, i16 %offset acquire
+   %old = atomicrmw max ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -3907,7 +3907,7 @@ define dso_local i32 @test_atomic_load_max_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i32* @var32, i32 %offset acquire
+   %old = atomicrmw max ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -3934,7 +3934,7 @@ define dso_local i64 @test_atomic_load_max_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i64* @var64, i64 %offset acquire
+   %old = atomicrmw max ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -3960,7 +3960,7 @@ define dso_local void @test_atomic_load_max_i32_noret_acquire(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i32* @var32, i32 %offset acquire
+   atomicrmw max ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -3985,7 +3985,7 @@ define dso_local void @test_atomic_load_max_i64_noret_acquire(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i64* @var64, i64 %offset acquire
+   atomicrmw max ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4012,7 +4012,7 @@ define dso_local i8 @test_atomic_load_max_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i8* @var8, i8 %offset monotonic
+   %old = atomicrmw max ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -4040,7 +4040,7 @@ define dso_local i16 @test_atomic_load_max_i16_monotonic(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i16* @var16, i16 %offset monotonic
+   %old = atomicrmw max ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -4067,7 +4067,7 @@ define dso_local i32 @test_atomic_load_max_i32_monotonic(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i32* @var32, i32 %offset monotonic
+   %old = atomicrmw max ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4094,7 +4094,7 @@ define dso_local i64 @test_atomic_load_max_i64_monotonic(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i64* @var64, i64 %offset monotonic
+   %old = atomicrmw max ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4120,7 +4120,7 @@ define dso_local void @test_atomic_load_max_i32_noret_monotonic(i32 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i32* @var32, i32 %offset monotonic
+   atomicrmw max ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4145,7 +4145,7 @@ define dso_local void @test_atomic_load_max_i64_noret_monotonic(i64 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i64* @var64, i64 %offset monotonic
+   atomicrmw max ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4172,7 +4172,7 @@ define dso_local i8 @test_atomic_load_max_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i8* @var8, i8 %offset release
+   %old = atomicrmw max ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -4200,7 +4200,7 @@ define dso_local i16 @test_atomic_load_max_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i16* @var16, i16 %offset release
+   %old = atomicrmw max ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -4227,7 +4227,7 @@ define dso_local i32 @test_atomic_load_max_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i32* @var32, i32 %offset release
+   %old = atomicrmw max ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4254,7 +4254,7 @@ define dso_local i64 @test_atomic_load_max_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i64* @var64, i64 %offset release
+   %old = atomicrmw max ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4280,7 +4280,7 @@ define dso_local void @test_atomic_load_max_i32_noret_release(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i32* @var32, i32 %offset release
+   atomicrmw max ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4305,7 +4305,7 @@ define dso_local void @test_atomic_load_max_i64_noret_release(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i64* @var64, i64 %offset release
+   atomicrmw max ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4332,7 +4332,7 @@ define dso_local i8 @test_atomic_load_max_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw max ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -4360,7 +4360,7 @@ define dso_local i16 @test_atomic_load_max_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw max ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -4387,7 +4387,7 @@ define dso_local i32 @test_atomic_load_max_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw max ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4414,7 +4414,7 @@ define dso_local i64 @test_atomic_load_max_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw max i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw max ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4440,7 +4440,7 @@ define dso_local void @test_atomic_load_max_i32_noret_seq_cst(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i32* @var32, i32 %offset seq_cst
+   atomicrmw max ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4465,7 +4465,7 @@ define dso_local void @test_atomic_load_max_i64_noret_seq_cst(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw max i64* @var64, i64 %offset seq_cst
+   atomicrmw max ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4492,7 +4492,7 @@ define dso_local i8 @test_atomic_load_min_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw min ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -4520,7 +4520,7 @@ define dso_local i16 @test_atomic_load_min_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw min ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -4547,7 +4547,7 @@ define dso_local i32 @test_atomic_load_min_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw min ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4574,7 +4574,7 @@ define dso_local i64 @test_atomic_load_min_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw min ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4600,7 +4600,7 @@ define dso_local void @test_atomic_load_min_i32_noret_acq_rel(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i32* @var32, i32 %offset acq_rel
+   atomicrmw min ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4625,7 +4625,7 @@ define dso_local void @test_atomic_load_min_i64_noret_acq_rel(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i64* @var64, i64 %offset acq_rel
+   atomicrmw min ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4652,7 +4652,7 @@ define dso_local i8 @test_atomic_load_min_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i8* @var8, i8 %offset acquire
+   %old = atomicrmw min ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -4680,7 +4680,7 @@ define dso_local i16 @test_atomic_load_min_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i16* @var16, i16 %offset acquire
+   %old = atomicrmw min ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -4707,7 +4707,7 @@ define dso_local i32 @test_atomic_load_min_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i32* @var32, i32 %offset acquire
+   %old = atomicrmw min ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4734,7 +4734,7 @@ define dso_local i64 @test_atomic_load_min_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i64* @var64, i64 %offset acquire
+   %old = atomicrmw min ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4760,7 +4760,7 @@ define dso_local void @test_atomic_load_min_i32_noret_acquire(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i32* @var32, i32 %offset acquire
+   atomicrmw min ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4785,7 +4785,7 @@ define dso_local void @test_atomic_load_min_i64_noret_acquire(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i64* @var64, i64 %offset acquire
+   atomicrmw min ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4812,7 +4812,7 @@ define dso_local i8 @test_atomic_load_min_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i8* @var8, i8 %offset monotonic
+   %old = atomicrmw min ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -4840,7 +4840,7 @@ define dso_local i16 @test_atomic_load_min_i16_monotonic(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i16* @var16, i16 %offset monotonic
+   %old = atomicrmw min ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -4867,7 +4867,7 @@ define dso_local i32 @test_atomic_load_min_i32_monotonic(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i32* @var32, i32 %offset monotonic
+   %old = atomicrmw min ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4894,7 +4894,7 @@ define dso_local i64 @test_atomic_load_min_i64_monotonic(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i64* @var64, i64 %offset monotonic
+   %old = atomicrmw min ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4920,7 +4920,7 @@ define dso_local void @test_atomic_load_min_i32_noret_monotonic(i32 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i32* @var32, i32 %offset monotonic
+   atomicrmw min ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -4945,7 +4945,7 @@ define dso_local void @test_atomic_load_min_i64_noret_monotonic(i64 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i64* @var64, i64 %offset monotonic
+   atomicrmw min ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -4972,7 +4972,7 @@ define dso_local i8 @test_atomic_load_min_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i8* @var8, i8 %offset release
+   %old = atomicrmw min ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -5000,7 +5000,7 @@ define dso_local i16 @test_atomic_load_min_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i16* @var16, i16 %offset release
+   %old = atomicrmw min ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -5027,7 +5027,7 @@ define dso_local i32 @test_atomic_load_min_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i32* @var32, i32 %offset release
+   %old = atomicrmw min ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5054,7 +5054,7 @@ define dso_local i64 @test_atomic_load_min_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i64* @var64, i64 %offset release
+   %old = atomicrmw min ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5080,7 +5080,7 @@ define dso_local void @test_atomic_load_min_i32_noret_release(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i32* @var32, i32 %offset release
+   atomicrmw min ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5105,7 +5105,7 @@ define dso_local void @test_atomic_load_min_i64_noret_release(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i64* @var64, i64 %offset release
+   atomicrmw min ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5132,7 +5132,7 @@ define dso_local i8 @test_atomic_load_min_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw min ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -5160,7 +5160,7 @@ define dso_local i16 @test_atomic_load_min_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw min ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -5187,7 +5187,7 @@ define dso_local i32 @test_atomic_load_min_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw min ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5214,7 +5214,7 @@ define dso_local i64 @test_atomic_load_min_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw min i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw min ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5240,7 +5240,7 @@ define dso_local void @test_atomic_load_min_i32_noret_seq_cst(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i32* @var32, i32 %offset seq_cst
+   atomicrmw min ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5265,7 +5265,7 @@ define dso_local void @test_atomic_load_min_i64_noret_seq_cst(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw min i64* @var64, i64 %offset seq_cst
+   atomicrmw min ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5285,7 +5285,7 @@ define dso_local i8 @test_atomic_load_or_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw or ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -5306,7 +5306,7 @@ define dso_local i16 @test_atomic_load_or_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw or ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -5327,7 +5327,7 @@ define dso_local i32 @test_atomic_load_or_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw or ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5348,7 +5348,7 @@ define dso_local i64 @test_atomic_load_or_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw or ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5369,7 +5369,7 @@ define dso_local void @test_atomic_load_or_i32_noret_acq_rel(i32 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i32* @var32, i32 %offset acq_rel
+   atomicrmw or ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5389,7 +5389,7 @@ define dso_local void @test_atomic_load_or_i64_noret_acq_rel(i64 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i64* @var64, i64 %offset acq_rel
+   atomicrmw or ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5409,7 +5409,7 @@ define dso_local i8 @test_atomic_load_or_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i8* @var8, i8 %offset acquire
+   %old = atomicrmw or ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -5430,7 +5430,7 @@ define dso_local i16 @test_atomic_load_or_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i16* @var16, i16 %offset acquire
+   %old = atomicrmw or ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -5451,7 +5451,7 @@ define dso_local i32 @test_atomic_load_or_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i32* @var32, i32 %offset acquire
+   %old = atomicrmw or ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5472,7 +5472,7 @@ define dso_local i64 @test_atomic_load_or_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i64* @var64, i64 %offset acquire
+   %old = atomicrmw or ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5493,7 +5493,7 @@ define dso_local void @test_atomic_load_or_i32_noret_acquire(i32 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i32* @var32, i32 %offset acquire
+   atomicrmw or ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5513,7 +5513,7 @@ define dso_local void @test_atomic_load_or_i64_noret_acquire(i64 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i64* @var64, i64 %offset acquire
+   atomicrmw or ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5533,7 +5533,7 @@ define dso_local i8 @test_atomic_load_or_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset1_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i8* @var8, i8 %offset monotonic
+   %old = atomicrmw or ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -5554,7 +5554,7 @@ define dso_local i16 @test_atomic_load_or_i16_monotonic(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset2_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i16* @var16, i16 %offset monotonic
+   %old = atomicrmw or ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -5575,7 +5575,7 @@ define dso_local i32 @test_atomic_load_or_i32_monotonic(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i32* @var32, i32 %offset monotonic
+   %old = atomicrmw or ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5596,7 +5596,7 @@ define dso_local i64 @test_atomic_load_or_i64_monotonic(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i64* @var64, i64 %offset monotonic
+   %old = atomicrmw or ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5617,7 +5617,7 @@ define dso_local void @test_atomic_load_or_i32_noret_monotonic(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i32* @var32, i32 %offset monotonic
+   atomicrmw or ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5637,7 +5637,7 @@ define dso_local void @test_atomic_load_or_i64_noret_monotonic(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i64* @var64, i64 %offset monotonic
+   atomicrmw or ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5657,7 +5657,7 @@ define dso_local i8 @test_atomic_load_or_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset1_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i8* @var8, i8 %offset release
+   %old = atomicrmw or ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -5678,7 +5678,7 @@ define dso_local i16 @test_atomic_load_or_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset2_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i16* @var16, i16 %offset release
+   %old = atomicrmw or ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -5699,7 +5699,7 @@ define dso_local i32 @test_atomic_load_or_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i32* @var32, i32 %offset release
+   %old = atomicrmw or ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5720,7 +5720,7 @@ define dso_local i64 @test_atomic_load_or_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i64* @var64, i64 %offset release
+   %old = atomicrmw or ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5741,7 +5741,7 @@ define dso_local void @test_atomic_load_or_i32_noret_release(i32 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i32* @var32, i32 %offset release
+   atomicrmw or ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5761,7 +5761,7 @@ define dso_local void @test_atomic_load_or_i64_noret_release(i64 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i64* @var64, i64 %offset release
+   atomicrmw or ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5781,7 +5781,7 @@ define dso_local i8 @test_atomic_load_or_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw or ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -5802,7 +5802,7 @@ define dso_local i16 @test_atomic_load_or_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw or ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -5823,7 +5823,7 @@ define dso_local i32 @test_atomic_load_or_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw or ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5844,7 +5844,7 @@ define dso_local i64 @test_atomic_load_or_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw or i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw or ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5865,7 +5865,7 @@ define dso_local void @test_atomic_load_or_i32_noret_seq_cst(i32 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i32* @var32, i32 %offset seq_cst
+   atomicrmw or ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -5885,7 +5885,7 @@ define dso_local void @test_atomic_load_or_i64_noret_seq_cst(i64 %offset) nounwi
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldset8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw or i64* @var64, i64 %offset seq_cst
+   atomicrmw or ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -5906,7 +5906,7 @@ define dso_local i8 @test_atomic_load_sub_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i8* @var8, i8 %offset acq_rel
+  %old = atomicrmw sub ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -5929,7 +5929,7 @@ define dso_local i16 @test_atomic_load_sub_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i16* @var16, i16 %offset acq_rel
+  %old = atomicrmw sub ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -5952,7 +5952,7 @@ define dso_local i32 @test_atomic_load_sub_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i32* @var32, i32 %offset acq_rel
+  %old = atomicrmw sub ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -5975,7 +5975,7 @@ define dso_local i64 @test_atomic_load_sub_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i64* @var64, i64 %offset acq_rel
+  %old = atomicrmw sub ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -5998,7 +5998,7 @@ define dso_local void @test_atomic_load_sub_i32_noret_acq_rel(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i32* @var32, i32 %offset acq_rel
+  atomicrmw sub ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6021,7 +6021,7 @@ define dso_local void @test_atomic_load_sub_i64_noret_acq_rel(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i64* @var64, i64 %offset acq_rel
+  atomicrmw sub ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6044,7 +6044,7 @@ define dso_local i8 @test_atomic_load_sub_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i8* @var8, i8 %offset acquire
+  %old = atomicrmw sub ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -6067,7 +6067,7 @@ define dso_local i16 @test_atomic_load_sub_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i16* @var16, i16 %offset acquire
+  %old = atomicrmw sub ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -6090,7 +6090,7 @@ define dso_local i32 @test_atomic_load_sub_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i32* @var32, i32 %offset acquire
+  %old = atomicrmw sub ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6113,7 +6113,7 @@ define dso_local i64 @test_atomic_load_sub_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i64* @var64, i64 %offset acquire
+  %old = atomicrmw sub ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6136,7 +6136,7 @@ define dso_local void @test_atomic_load_sub_i32_noret_acquire(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i32* @var32, i32 %offset acquire
+  atomicrmw sub ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6159,7 +6159,7 @@ define dso_local void @test_atomic_load_sub_i64_noret_acquire(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i64* @var64, i64 %offset acquire
+  atomicrmw sub ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6182,7 +6182,7 @@ define dso_local i8 @test_atomic_load_sub_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i8* @var8, i8 %offset monotonic
+  %old = atomicrmw sub ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -6205,7 +6205,7 @@ define dso_local i16 @test_atomic_load_sub_i16_monotonic(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i16* @var16, i16 %offset monotonic
+  %old = atomicrmw sub ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -6228,7 +6228,7 @@ define dso_local i32 @test_atomic_load_sub_i32_monotonic(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i32* @var32, i32 %offset monotonic
+  %old = atomicrmw sub ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6251,7 +6251,7 @@ define dso_local i64 @test_atomic_load_sub_i64_monotonic(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i64* @var64, i64 %offset monotonic
+  %old = atomicrmw sub ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6274,7 +6274,7 @@ define dso_local void @test_atomic_load_sub_i32_noret_monotonic(i32 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i32* @var32, i32 %offset monotonic
+  atomicrmw sub ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6297,7 +6297,7 @@ define dso_local void @test_atomic_load_sub_i64_noret_monotonic(i64 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i64* @var64, i64 %offset monotonic
+  atomicrmw sub ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6320,7 +6320,7 @@ define dso_local i8 @test_atomic_load_sub_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i8* @var8, i8 %offset release
+  %old = atomicrmw sub ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -6343,7 +6343,7 @@ define dso_local i16 @test_atomic_load_sub_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i16* @var16, i16 %offset release
+  %old = atomicrmw sub ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -6366,7 +6366,7 @@ define dso_local i32 @test_atomic_load_sub_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i32* @var32, i32 %offset release
+  %old = atomicrmw sub ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6389,7 +6389,7 @@ define dso_local i64 @test_atomic_load_sub_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i64* @var64, i64 %offset release
+  %old = atomicrmw sub ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6412,7 +6412,7 @@ define dso_local void @test_atomic_load_sub_i32_noret_release(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i32* @var32, i32 %offset release
+  atomicrmw sub ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6435,7 +6435,7 @@ define dso_local void @test_atomic_load_sub_i64_noret_release(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i64* @var64, i64 %offset release
+  atomicrmw sub ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6458,7 +6458,7 @@ define dso_local i8 @test_atomic_load_sub_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i8* @var8, i8 %offset seq_cst
+  %old = atomicrmw sub ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -6481,7 +6481,7 @@ define dso_local i16 @test_atomic_load_sub_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i16* @var16, i16 %offset seq_cst
+  %old = atomicrmw sub ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -6504,7 +6504,7 @@ define dso_local i32 @test_atomic_load_sub_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i32* @var32, i32 %offset seq_cst
+  %old = atomicrmw sub ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6527,7 +6527,7 @@ define dso_local i64 @test_atomic_load_sub_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
+  %old = atomicrmw sub ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6550,7 +6550,7 @@ define dso_local void @test_atomic_load_sub_i32_noret_seq_cst(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i32* @var32, i32 %offset seq_cst
+  atomicrmw sub ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg w[[NEG:[0-9]+]], w[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -6573,7 +6573,7 @@ define dso_local void @test_atomic_load_sub_i64_noret_seq_cst(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  atomicrmw sub i64* @var64, i64 %offset seq_cst
+  atomicrmw sub ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: neg x[[NEG:[0-9]+]], x[[OLD:[0-9]+]]
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -6595,7 +6595,7 @@ define dso_local i8 @test_atomic_load_xchg_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw xchg ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -6616,7 +6616,7 @@ define dso_local i16 @test_atomic_load_xchg_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw xchg ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -6637,7 +6637,7 @@ define dso_local i32 @test_atomic_load_xchg_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw xchg ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -6658,7 +6658,7 @@ define dso_local i64 @test_atomic_load_xchg_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw xchg ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -6679,7 +6679,7 @@ define dso_local void @test_atomic_load_xchg_i32_noret_acq_rel(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i32* @var32, i32 %offset acq_rel
+   atomicrmw xchg ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -6700,7 +6700,7 @@ define dso_local void @test_atomic_load_xchg_i64_noret_acq_rel(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i64* @var64, i64 %offset acq_rel
+   atomicrmw xchg ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -6721,7 +6721,7 @@ define dso_local i8 @test_atomic_load_xchg_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i8* @var8, i8 %offset acquire
+   %old = atomicrmw xchg ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -6742,7 +6742,7 @@ define dso_local i16 @test_atomic_load_xchg_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i16* @var16, i16 %offset acquire
+   %old = atomicrmw xchg ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -6763,7 +6763,7 @@ define dso_local i32 @test_atomic_load_xchg_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i32* @var32, i32 %offset acquire
+   %old = atomicrmw xchg ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -6784,7 +6784,7 @@ define dso_local i64 @test_atomic_load_xchg_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i64* @var64, i64 %offset acquire
+   %old = atomicrmw xchg ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -6805,7 +6805,7 @@ define dso_local void @test_atomic_load_xchg_i32_noret_acquire(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i32* @var32, i32 %offset acquire
+   atomicrmw xchg ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -6826,7 +6826,7 @@ define dso_local void @test_atomic_load_xchg_i64_noret_acquire(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i64* @var64, i64 %offset acquire
+   atomicrmw xchg ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -6847,7 +6847,7 @@ define dso_local i8 @test_atomic_load_xchg_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp1_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i8* @var8, i8 %offset monotonic
+   %old = atomicrmw xchg ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -6868,7 +6868,7 @@ define dso_local i16 @test_atomic_load_xchg_i16_monotonic(i16 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp2_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i16* @var16, i16 %offset monotonic
+   %old = atomicrmw xchg ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -6889,7 +6889,7 @@ define dso_local i32 @test_atomic_load_xchg_i32_monotonic(i32 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i32* @var32, i32 %offset monotonic
+   %old = atomicrmw xchg ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -6910,7 +6910,7 @@ define dso_local i64 @test_atomic_load_xchg_i64_monotonic(i64 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i64* @var64, i64 %offset monotonic
+   %old = atomicrmw xchg ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -6931,7 +6931,7 @@ define dso_local void @test_atomic_load_xchg_i32_noret_monotonic(i32 %offset) no
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i32* @var32, i32 %offset monotonic
+   atomicrmw xchg ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -6952,7 +6952,7 @@ define dso_local void @test_atomic_load_xchg_i64_noret_monotonic(i64 %offset) no
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i64* @var64, i64 %offset monotonic
+   atomicrmw xchg ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -6973,7 +6973,7 @@ define dso_local i8 @test_atomic_load_xchg_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp1_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i8* @var8, i8 %offset release
+   %old = atomicrmw xchg ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -6994,7 +6994,7 @@ define dso_local i16 @test_atomic_load_xchg_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp2_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i16* @var16, i16 %offset release
+   %old = atomicrmw xchg ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -7015,7 +7015,7 @@ define dso_local i32 @test_atomic_load_xchg_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i32* @var32, i32 %offset release
+   %old = atomicrmw xchg ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7036,7 +7036,7 @@ define dso_local i64 @test_atomic_load_xchg_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i64* @var64, i64 %offset release
+   %old = atomicrmw xchg ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7057,7 +7057,7 @@ define dso_local void @test_atomic_load_xchg_i32_noret_release(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i32* @var32, i32 %offset release
+   atomicrmw xchg ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7078,7 +7078,7 @@ define dso_local void @test_atomic_load_xchg_i64_noret_release(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i64* @var64, i64 %offset release
+   atomicrmw xchg ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7099,7 +7099,7 @@ define dso_local i8 @test_atomic_load_xchg_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw xchg ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -7120,7 +7120,7 @@ define dso_local i16 @test_atomic_load_xchg_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw xchg ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -7141,7 +7141,7 @@ define dso_local i32 @test_atomic_load_xchg_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw xchg ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7162,7 +7162,7 @@ define dso_local i64 @test_atomic_load_xchg_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw xchg ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7183,7 +7183,7 @@ define dso_local void @test_atomic_load_xchg_i32_noret_seq_cst(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i32* @var32, i32 %offset seq_cst
+   atomicrmw xchg ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7204,7 +7204,7 @@ define dso_local void @test_atomic_load_xchg_i64_noret_seq_cst(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_swp8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xchg i64* @var64, i64 %offset seq_cst
+   atomicrmw xchg ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7232,7 +7232,7 @@ define dso_local i8 @test_atomic_load_umax_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw umax ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -7260,7 +7260,7 @@ define dso_local i16 @test_atomic_load_umax_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw umax ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -7287,7 +7287,7 @@ define dso_local i32 @test_atomic_load_umax_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw umax ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7314,7 +7314,7 @@ define dso_local i64 @test_atomic_load_umax_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw umax ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7340,7 +7340,7 @@ define dso_local void @test_atomic_load_umax_i32_noret_acq_rel(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i32* @var32, i32 %offset acq_rel
+   atomicrmw umax ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7365,7 +7365,7 @@ define dso_local void @test_atomic_load_umax_i64_noret_acq_rel(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i64* @var64, i64 %offset acq_rel
+   atomicrmw umax ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7392,7 +7392,7 @@ define dso_local i8 @test_atomic_load_umax_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i8* @var8, i8 %offset acquire
+   %old = atomicrmw umax ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -7420,7 +7420,7 @@ define dso_local i16 @test_atomic_load_umax_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i16* @var16, i16 %offset acquire
+   %old = atomicrmw umax ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -7447,7 +7447,7 @@ define dso_local i32 @test_atomic_load_umax_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i32* @var32, i32 %offset acquire
+   %old = atomicrmw umax ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7474,7 +7474,7 @@ define dso_local i64 @test_atomic_load_umax_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i64* @var64, i64 %offset acquire
+   %old = atomicrmw umax ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7500,7 +7500,7 @@ define dso_local void @test_atomic_load_umax_i32_noret_acquire(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i32* @var32, i32 %offset acquire
+   atomicrmw umax ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7525,7 +7525,7 @@ define dso_local void @test_atomic_load_umax_i64_noret_acquire(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i64* @var64, i64 %offset acquire
+   atomicrmw umax ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7552,7 +7552,7 @@ define dso_local i8 @test_atomic_load_umax_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i8* @var8, i8 %offset monotonic
+   %old = atomicrmw umax ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -7580,7 +7580,7 @@ define dso_local i16 @test_atomic_load_umax_i16_monotonic(i16 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i16* @var16, i16 %offset monotonic
+   %old = atomicrmw umax ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -7607,7 +7607,7 @@ define dso_local i32 @test_atomic_load_umax_i32_monotonic(i32 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i32* @var32, i32 %offset monotonic
+   %old = atomicrmw umax ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7634,7 +7634,7 @@ define dso_local i64 @test_atomic_load_umax_i64_monotonic(i64 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i64* @var64, i64 %offset monotonic
+   %old = atomicrmw umax ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7660,7 +7660,7 @@ define dso_local void @test_atomic_load_umax_i32_noret_monotonic(i32 %offset) no
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i32* @var32, i32 %offset monotonic
+   atomicrmw umax ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7685,7 +7685,7 @@ define dso_local void @test_atomic_load_umax_i64_noret_monotonic(i64 %offset) no
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i64* @var64, i64 %offset monotonic
+   atomicrmw umax ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7712,7 +7712,7 @@ define dso_local i8 @test_atomic_load_umax_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i8* @var8, i8 %offset release
+   %old = atomicrmw umax ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -7740,7 +7740,7 @@ define dso_local i16 @test_atomic_load_umax_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i16* @var16, i16 %offset release
+   %old = atomicrmw umax ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -7767,7 +7767,7 @@ define dso_local i32 @test_atomic_load_umax_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i32* @var32, i32 %offset release
+   %old = atomicrmw umax ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7794,7 +7794,7 @@ define dso_local i64 @test_atomic_load_umax_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i64* @var64, i64 %offset release
+   %old = atomicrmw umax ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7820,7 +7820,7 @@ define dso_local void @test_atomic_load_umax_i32_noret_release(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i32* @var32, i32 %offset release
+   atomicrmw umax ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7845,7 +7845,7 @@ define dso_local void @test_atomic_load_umax_i64_noret_release(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i64* @var64, i64 %offset release
+   atomicrmw umax ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7872,7 +7872,7 @@ define dso_local i8 @test_atomic_load_umax_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw umax ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -7900,7 +7900,7 @@ define dso_local i16 @test_atomic_load_umax_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw umax ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -7927,7 +7927,7 @@ define dso_local i32 @test_atomic_load_umax_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw umax ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -7954,7 +7954,7 @@ define dso_local i64 @test_atomic_load_umax_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umax i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw umax ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -7980,7 +7980,7 @@ define dso_local void @test_atomic_load_umax_i32_noret_seq_cst(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i32* @var32, i32 %offset seq_cst
+   atomicrmw umax ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8005,7 +8005,7 @@ define dso_local void @test_atomic_load_umax_i64_noret_seq_cst(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umax i64* @var64, i64 %offset seq_cst
+   atomicrmw umax ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8032,7 +8032,7 @@ define dso_local i8 @test_atomic_load_umin_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw umin ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -8060,7 +8060,7 @@ define dso_local i16 @test_atomic_load_umin_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw umin ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -8087,7 +8087,7 @@ define dso_local i32 @test_atomic_load_umin_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw umin ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8114,7 +8114,7 @@ define dso_local i64 @test_atomic_load_umin_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw umin ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8140,7 +8140,7 @@ define dso_local void @test_atomic_load_umin_i32_noret_acq_rel(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i32* @var32, i32 %offset acq_rel
+   atomicrmw umin ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8165,7 +8165,7 @@ define dso_local void @test_atomic_load_umin_i64_noret_acq_rel(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i64* @var64, i64 %offset acq_rel
+   atomicrmw umin ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8192,7 +8192,7 @@ define dso_local i8 @test_atomic_load_umin_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i8* @var8, i8 %offset acquire
+   %old = atomicrmw umin ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -8220,7 +8220,7 @@ define dso_local i16 @test_atomic_load_umin_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i16* @var16, i16 %offset acquire
+   %old = atomicrmw umin ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -8247,7 +8247,7 @@ define dso_local i32 @test_atomic_load_umin_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i32* @var32, i32 %offset acquire
+   %old = atomicrmw umin ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8274,7 +8274,7 @@ define dso_local i64 @test_atomic_load_umin_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i64* @var64, i64 %offset acquire
+   %old = atomicrmw umin ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8300,7 +8300,7 @@ define dso_local void @test_atomic_load_umin_i32_noret_acquire(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i32* @var32, i32 %offset acquire
+   atomicrmw umin ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8325,7 +8325,7 @@ define dso_local void @test_atomic_load_umin_i64_noret_acquire(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i64* @var64, i64 %offset acquire
+   atomicrmw umin ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8352,7 +8352,7 @@ define dso_local i8 @test_atomic_load_umin_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i8* @var8, i8 %offset monotonic
+   %old = atomicrmw umin ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -8380,7 +8380,7 @@ define dso_local i16 @test_atomic_load_umin_i16_monotonic(i16 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i16* @var16, i16 %offset monotonic
+   %old = atomicrmw umin ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -8407,7 +8407,7 @@ define dso_local i32 @test_atomic_load_umin_i32_monotonic(i32 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i32* @var32, i32 %offset monotonic
+   %old = atomicrmw umin ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8434,7 +8434,7 @@ define dso_local i64 @test_atomic_load_umin_i64_monotonic(i64 %offset) nounwind
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i64* @var64, i64 %offset monotonic
+   %old = atomicrmw umin ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8460,7 +8460,7 @@ define dso_local void @test_atomic_load_umin_i32_noret_monotonic(i32 %offset) no
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i32* @var32, i32 %offset monotonic
+   atomicrmw umin ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8485,7 +8485,7 @@ define dso_local void @test_atomic_load_umin_i64_noret_monotonic(i64 %offset) no
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i64* @var64, i64 %offset monotonic
+   atomicrmw umin ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8512,7 +8512,7 @@ define dso_local i8 @test_atomic_load_umin_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i8* @var8, i8 %offset release
+   %old = atomicrmw umin ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -8540,7 +8540,7 @@ define dso_local i16 @test_atomic_load_umin_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i16* @var16, i16 %offset release
+   %old = atomicrmw umin ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -8567,7 +8567,7 @@ define dso_local i32 @test_atomic_load_umin_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i32* @var32, i32 %offset release
+   %old = atomicrmw umin ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8594,7 +8594,7 @@ define dso_local i64 @test_atomic_load_umin_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i64* @var64, i64 %offset release
+   %old = atomicrmw umin ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8620,7 +8620,7 @@ define dso_local void @test_atomic_load_umin_i32_noret_release(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i32* @var32, i32 %offset release
+   atomicrmw umin ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8645,7 +8645,7 @@ define dso_local void @test_atomic_load_umin_i64_noret_release(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i64* @var64, i64 %offset release
+   atomicrmw umin ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8672,7 +8672,7 @@ define dso_local i8 @test_atomic_load_umin_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw umin ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -8700,7 +8700,7 @@ define dso_local i16 @test_atomic_load_umin_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw umin ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -8727,7 +8727,7 @@ define dso_local i32 @test_atomic_load_umin_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov w0, w8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw umin ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8754,7 +8754,7 @@ define dso_local i64 @test_atomic_load_umin_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    mov x0, x8
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw umin i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw umin ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8780,7 +8780,7 @@ define dso_local void @test_atomic_load_umin_i32_noret_seq_cst(i32 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i32* @var32, i32 %offset seq_cst
+   atomicrmw umin ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8805,7 +8805,7 @@ define dso_local void @test_atomic_load_umin_i64_noret_seq_cst(i64 %offset) noun
 ; OUTLINE-ATOMICS-NEXT:    cbnz w10, .LBB[[LOOPSTART]]
 ; OUTLINE-ATOMICS-NEXT:  // %bb.2: // %atomicrmw.end
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw umin i64* @var64, i64 %offset seq_cst
+   atomicrmw umin ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8825,7 +8825,7 @@ define dso_local i8 @test_atomic_load_xor_i8_acq_rel(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw xor ptr @var8, i8 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -8846,7 +8846,7 @@ define dso_local i16 @test_atomic_load_xor_i16_acq_rel(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i16* @var16, i16 %offset acq_rel
+   %old = atomicrmw xor ptr @var16, i16 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -8867,7 +8867,7 @@ define dso_local i32 @test_atomic_load_xor_i32_acq_rel(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i32* @var32, i32 %offset acq_rel
+   %old = atomicrmw xor ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8888,7 +8888,7 @@ define dso_local i64 @test_atomic_load_xor_i64_acq_rel(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw xor ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8909,7 +8909,7 @@ define dso_local void @test_atomic_load_xor_i32_noret_acq_rel(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i32* @var32, i32 %offset acq_rel
+   atomicrmw xor ptr @var32, i32 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -8929,7 +8929,7 @@ define dso_local void @test_atomic_load_xor_i64_noret_acq_rel(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i64* @var64, i64 %offset acq_rel
+   atomicrmw xor ptr @var64, i64 %offset acq_rel
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -8949,7 +8949,7 @@ define dso_local i8 @test_atomic_load_xor_i8_acquire(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor1_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i8* @var8, i8 %offset acquire
+   %old = atomicrmw xor ptr @var8, i8 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -8970,7 +8970,7 @@ define dso_local i16 @test_atomic_load_xor_i16_acquire(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor2_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i16* @var16, i16 %offset acquire
+   %old = atomicrmw xor ptr @var16, i16 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -8991,7 +8991,7 @@ define dso_local i32 @test_atomic_load_xor_i32_acquire(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i32* @var32, i32 %offset acquire
+   %old = atomicrmw xor ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9012,7 +9012,7 @@ define dso_local i64 @test_atomic_load_xor_i64_acquire(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i64* @var64, i64 %offset acquire
+   %old = atomicrmw xor ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9033,7 +9033,7 @@ define dso_local void @test_atomic_load_xor_i32_noret_acquire(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i32* @var32, i32 %offset acquire
+   atomicrmw xor ptr @var32, i32 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9053,7 +9053,7 @@ define dso_local void @test_atomic_load_xor_i64_noret_acquire(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i64* @var64, i64 %offset acquire
+   atomicrmw xor ptr @var64, i64 %offset acquire
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9073,7 +9073,7 @@ define dso_local i8 @test_atomic_load_xor_i8_monotonic(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor1_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i8* @var8, i8 %offset monotonic
+   %old = atomicrmw xor ptr @var8, i8 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -9094,7 +9094,7 @@ define dso_local i16 @test_atomic_load_xor_i16_monotonic(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor2_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i16* @var16, i16 %offset monotonic
+   %old = atomicrmw xor ptr @var16, i16 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -9115,7 +9115,7 @@ define dso_local i32 @test_atomic_load_xor_i32_monotonic(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i32* @var32, i32 %offset monotonic
+   %old = atomicrmw xor ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9136,7 +9136,7 @@ define dso_local i64 @test_atomic_load_xor_i64_monotonic(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i64* @var64, i64 %offset monotonic
+   %old = atomicrmw xor ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9157,7 +9157,7 @@ define dso_local void @test_atomic_load_xor_i32_noret_monotonic(i32 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i32* @var32, i32 %offset monotonic
+   atomicrmw xor ptr @var32, i32 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9177,7 +9177,7 @@ define dso_local void @test_atomic_load_xor_i64_noret_monotonic(i64 %offset) nou
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_relax
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i64* @var64, i64 %offset monotonic
+   atomicrmw xor ptr @var64, i64 %offset monotonic
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9197,7 +9197,7 @@ define dso_local i8 @test_atomic_load_xor_i8_release(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor1_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i8* @var8, i8 %offset release
+   %old = atomicrmw xor ptr @var8, i8 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -9218,7 +9218,7 @@ define dso_local i16 @test_atomic_load_xor_i16_release(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor2_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i16* @var16, i16 %offset release
+   %old = atomicrmw xor ptr @var16, i16 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -9239,7 +9239,7 @@ define dso_local i32 @test_atomic_load_xor_i32_release(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i32* @var32, i32 %offset release
+   %old = atomicrmw xor ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9260,7 +9260,7 @@ define dso_local i64 @test_atomic_load_xor_i64_release(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i64* @var64, i64 %offset release
+   %old = atomicrmw xor ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9281,7 +9281,7 @@ define dso_local void @test_atomic_load_xor_i32_noret_release(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i32* @var32, i32 %offset release
+   atomicrmw xor ptr @var32, i32 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9301,7 +9301,7 @@ define dso_local void @test_atomic_load_xor_i64_noret_release(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i64* @var64, i64 %offset release
+   atomicrmw xor ptr @var64, i64 %offset release
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9321,7 +9321,7 @@ define dso_local i8 @test_atomic_load_xor_i8_seq_cst(i8 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor1_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw xor ptr @var8, i8 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var8
@@ -9342,7 +9342,7 @@ define dso_local i16 @test_atomic_load_xor_i16_seq_cst(i16 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor2_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw xor ptr @var16, i16 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var16
@@ -9363,7 +9363,7 @@ define dso_local i32 @test_atomic_load_xor_i32_seq_cst(i32 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw xor ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9384,7 +9384,7 @@ define dso_local i64 @test_atomic_load_xor_i64_seq_cst(i64 %offset) nounwind {
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw xor ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9405,7 +9405,7 @@ define dso_local void @test_atomic_load_xor_i32_noret_seq_cst(i32 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor4_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i32* @var32, i32 %offset seq_cst
+   atomicrmw xor ptr @var32, i32 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
@@ -9425,7 +9425,7 @@ define dso_local void @test_atomic_load_xor_i64_noret_seq_cst(i64 %offset) nounw
 ; OUTLINE-ATOMICS-NEXT:    bl __aarch64_ldeor8_acq_rel
 ; OUTLINE-ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-   atomicrmw xor i64* @var64, i64 %offset seq_cst
+   atomicrmw xor ptr @var64, i64 %offset seq_cst
 ; CHECK-NOT: dmb
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
@@ -9442,6 +9442,6 @@ define dso_local i128 @test_atomic_load_i128() nounwind {
 ; OUTLINE-ATOMICS-LABEL: test_atomic_load_i128:
 ; OUTLINE-ATOMICS: ldxp
 ; OUTLINE-ATOMICS: stxp
-   %pair = load atomic i128, i128* @var128 monotonic, align 16
+   %pair = load atomic i128, ptr @var128 monotonic, align 16
    ret i128 %pair
 }

diff  --git a/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll b/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
index a14c3416e576c..b45d5246ddbf0 100644
--- a/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
+++ b/llvm/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
@@ -1,18 +1,18 @@
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -mattr=+outline-atomics < %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
 
-define i32 @foo(i32* %var, i1 %cond) {
+define i32 @foo(ptr %var, i1 %cond) {
 ; OUTLINE-ATOMICS: bl __aarch64_ldadd4_relax
 ; CHECK-LABEL: foo:
   br i1 %cond, label %atomic_ver, label %simple_ver
 simple_ver:
-  %oldval = load i32, i32* %var
+  %oldval = load i32, ptr %var
   %newval = add nsw i32 %oldval, -1
-  store i32 %newval, i32* %var
+  store i32 %newval, ptr %var
   br label %somewhere
 atomic_ver:
   fence seq_cst
-  %val = atomicrmw add i32* %var, i32 -1 monotonic
+  %val = atomicrmw add ptr %var, i32 -1 monotonic
   fence seq_cst
   br label %somewhere
 ; CHECK: dmb

diff  --git a/llvm/test/CodeGen/AArch64/atomic-ops.ll b/llvm/test/CodeGen/AArch64/atomic-ops.ll
index 4f8b17c935099..4b227c881f385 100644
--- a/llvm/test/CodeGen/AArch64/atomic-ops.ll
+++ b/llvm/test/CodeGen/AArch64/atomic-ops.ll
@@ -30,7 +30,7 @@ define dso_local i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd1_acq_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw add i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw add ptr @var8, i8 %offset seq_cst
    ret i8 %old
 }
 
@@ -57,7 +57,7 @@ define dso_local i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd2_acq
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw add i16* @var16, i16 %offset acquire
+   %old = atomicrmw add ptr @var16, i16 %offset acquire
    ret i16 %old
 }
 
@@ -84,7 +84,7 @@ define dso_local i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd4_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw add i32* @var32, i32 %offset release
+   %old = atomicrmw add ptr @var32, i32 %offset release
    ret i32 %old
 }
 
@@ -111,7 +111,7 @@ define dso_local i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd8_relax
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw add i64* @var64, i64 %offset monotonic
+   %old = atomicrmw add ptr @var64, i64 %offset monotonic
    ret i64 %old
 }
 
@@ -139,7 +139,7 @@ define dso_local i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd1_relax
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw sub i8* @var8, i8 %offset monotonic
+   %old = atomicrmw sub ptr @var8, i8 %offset monotonic
    ret i8 %old
 }
 
@@ -167,7 +167,7 @@ define dso_local i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd2_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw sub i16* @var16, i16 %offset release
+   %old = atomicrmw sub ptr @var16, i16 %offset release
    ret i16 %old
 }
 
@@ -195,7 +195,7 @@ define dso_local i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd4_acq
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw sub i32* @var32, i32 %offset acquire
+   %old = atomicrmw sub ptr @var32, i32 %offset acquire
    ret i32 %old
 }
 
@@ -223,7 +223,7 @@ define dso_local i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldadd8_acq_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw sub ptr @var64, i64 %offset seq_cst
    ret i64 %old
 }
 
@@ -251,7 +251,7 @@ define dso_local i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldclr1_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw and i8* @var8, i8 %offset release
+   %old = atomicrmw and ptr @var8, i8 %offset release
    ret i8 %old
 }
 
@@ -279,7 +279,7 @@ define dso_local i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldclr2_relax
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw and i16* @var16, i16 %offset monotonic
+   %old = atomicrmw and ptr @var16, i16 %offset monotonic
    ret i16 %old
 }
 
@@ -307,7 +307,7 @@ define dso_local i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldclr4_acq_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw and i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw and ptr @var32, i32 %offset seq_cst
    ret i32 %old
 }
 
@@ -335,7 +335,7 @@ define dso_local i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldclr8_acq
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw and i64* @var64, i64 %offset acquire
+   %old = atomicrmw and ptr @var64, i64 %offset acquire
    ret i64 %old
 }
 
@@ -362,7 +362,7 @@ define dso_local i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldset1_acq_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw or i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw or ptr @var8, i8 %offset seq_cst
    ret i8 %old
 }
 
@@ -389,7 +389,7 @@ define dso_local i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldset2_relax
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw or i16* @var16, i16 %offset monotonic
+   %old = atomicrmw or ptr @var16, i16 %offset monotonic
    ret i16 %old
 }
 
@@ -416,7 +416,7 @@ define dso_local i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldset4_acq
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw or i32* @var32, i32 %offset acquire
+   %old = atomicrmw or ptr @var32, i32 %offset acquire
    ret i32 %old
 }
 
@@ -443,7 +443,7 @@ define dso_local i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldset8_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw or i64* @var64, i64 %offset release
+   %old = atomicrmw or ptr @var64, i64 %offset release
    ret i64 %old
 }
 
@@ -470,7 +470,7 @@ define dso_local i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldeor1_acq
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i8* @var8, i8 %offset acquire
+   %old = atomicrmw xor ptr @var8, i8 %offset acquire
    ret i8 %old
 }
 
@@ -497,7 +497,7 @@ define dso_local i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldeor2_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i16* @var16, i16 %offset release
+   %old = atomicrmw xor ptr @var16, i16 %offset release
    ret i16 %old
 }
 
@@ -524,7 +524,7 @@ define dso_local i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldeor4_acq_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw xor ptr @var32, i32 %offset seq_cst
    ret i32 %old
 }
 
@@ -551,7 +551,7 @@ define dso_local i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_ldeor8_relax
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xor i64* @var64, i64 %offset monotonic
+   %old = atomicrmw xor ptr @var64, i64 %offset monotonic
    ret i64 %old
 }
 
@@ -578,7 +578,7 @@ define dso_local i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_swp1_relax
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i8* @var8, i8 %offset monotonic
+   %old = atomicrmw xchg ptr @var8, i8 %offset monotonic
    ret i8 %old
 }
 
@@ -605,7 +605,7 @@ define dso_local i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_swp2_acq_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
+   %old = atomicrmw xchg ptr @var16, i16 %offset seq_cst
    ret i16 %old
 }
 
@@ -632,7 +632,7 @@ define dso_local i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_swp4_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i32* @var32, i32 %offset release
+   %old = atomicrmw xchg ptr @var32, i32 %offset release
    ret i32 %old
 }
 
@@ -658,7 +658,7 @@ define dso_local i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_swp8_acq
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %old = atomicrmw xchg i64* @var64, i64 %offset acquire
+   %old = atomicrmw xchg ptr @var64, i64 %offset acquire
    ret i64 %old
 }
 
@@ -679,7 +679,7 @@ define dso_local i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw min i8* @var8, i8 %offset acquire
+   %old = atomicrmw min ptr @var8, i8 %offset acquire
    ret i8 %old
 }
 
@@ -699,7 +699,7 @@ define dso_local i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw min i16* @var16, i16 %offset release
+   %old = atomicrmw min ptr @var16, i16 %offset release
    ret i16 %old
 }
 
@@ -718,7 +718,7 @@ define dso_local i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw min i32* @var32, i32 %offset monotonic
+   %old = atomicrmw min ptr @var32, i32 %offset monotonic
    ret i32 %old
 }
 
@@ -737,7 +737,7 @@ define dso_local i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw min i64* @var64, i64 %offset seq_cst
+   %old = atomicrmw min ptr @var64, i64 %offset seq_cst
    ret i64 %old
 }
 
@@ -757,7 +757,7 @@ define dso_local i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw max i8* @var8, i8 %offset seq_cst
+   %old = atomicrmw max ptr @var8, i8 %offset seq_cst
    ret i8 %old
 }
 
@@ -777,7 +777,7 @@ define dso_local i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw max i16* @var16, i16 %offset acquire
+   %old = atomicrmw max ptr @var16, i16 %offset acquire
    ret i16 %old
 }
 
@@ -796,7 +796,7 @@ define dso_local i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw max i32* @var32, i32 %offset release
+   %old = atomicrmw max ptr @var32, i32 %offset release
    ret i32 %old
 }
 
@@ -815,7 +815,7 @@ define dso_local i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw max i64* @var64, i64 %offset monotonic
+   %old = atomicrmw max ptr @var64, i64 %offset monotonic
    ret i64 %old
 }
 
@@ -835,7 +835,7 @@ define dso_local i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umin i8* @var8, i8 %offset monotonic
+   %old = atomicrmw umin ptr @var8, i8 %offset monotonic
    ret i8 %old
 }
 
@@ -855,7 +855,7 @@ define dso_local i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umin i16* @var16, i16 %offset acquire
+   %old = atomicrmw umin ptr @var16, i16 %offset acquire
    ret i16 %old
 }
 
@@ -874,7 +874,7 @@ define dso_local i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw umin ptr @var32, i32 %offset seq_cst
    ret i32 %old
 }
 
@@ -893,7 +893,7 @@ define dso_local i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umin i64* @var64, i64 %offset acq_rel
+   %old = atomicrmw umin ptr @var64, i64 %offset acq_rel
    ret i64 %old
 }
 
@@ -913,7 +913,7 @@ define dso_local i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umax i8* @var8, i8 %offset acq_rel
+   %old = atomicrmw umax ptr @var8, i8 %offset acq_rel
    ret i8 %old
 }
 
@@ -933,7 +933,7 @@ define dso_local i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umax i16* @var16, i16 %offset monotonic
+   %old = atomicrmw umax ptr @var16, i16 %offset monotonic
    ret i16 %old
 }
 
@@ -952,7 +952,7 @@ define dso_local i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
+   %old = atomicrmw umax ptr @var32, i32 %offset seq_cst
    ret i32 %old
 }
 
@@ -971,7 +971,7 @@ define dso_local i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
 ; CHECK-NEXT:  // %bb.2: // %atomicrmw.end
 ; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
-   %old = atomicrmw umax i64* @var64, i64 %offset release
+   %old = atomicrmw umax ptr @var64, i64 %offset release
    ret i64 %old
 }
 
@@ -1007,7 +1007,7 @@ define dso_local i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_cas1_acq
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
+   %pair = cmpxchg ptr @var8, i8 %wanted, i8 %new acquire acquire
    %old = extractvalue { i8, i1 } %pair, 0
    ret i8 %old
 }
@@ -1044,7 +1044,7 @@ define dso_local i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_cas2_acq_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %pair = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
+   %pair = cmpxchg ptr @var16, i16 %wanted, i16 %new seq_cst seq_cst
    %old = extractvalue { i16, i1 } %pair, 0
    ret i16 %old
 }
@@ -1080,7 +1080,7 @@ define dso_local i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    bl __aarch64_cas4_rel
 ; OUTLINE_ATOMICS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %pair = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
+   %pair = cmpxchg ptr @var32, i32 %wanted, i32 %new release monotonic
    %old = extractvalue { i32, i1 } %pair, 0
    ret i32 %old
 }
@@ -1117,9 +1117,9 @@ define dso_local void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
 ; OUTLINE_ATOMICS-NEXT:    str x0, [x19]
 ; OUTLINE_ATOMICS-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; OUTLINE_ATOMICS-NEXT:    ret
-   %pair = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
+   %pair = cmpxchg ptr @var64, i64 %wanted, i64 %new monotonic monotonic
    %old = extractvalue { i64, i1 } %pair, 0
-   store i64 %old, i64* @var64
+   store i64 %old, ptr @var64
    ret void
 }
 
@@ -1129,7 +1129,7 @@ define dso_local i8 @test_atomic_load_monotonic_i8() nounwind {
 ; CHECK-NEXT:    adrp x8, var8
 ; CHECK-NEXT:    ldrb w0, [x8, :lo12:var8]
 ; CHECK-NEXT:    ret
-  %val = load atomic i8, i8* @var8 monotonic, align 1
+  %val = load atomic i8, ptr @var8 monotonic, align 1
   ret i8 %val
 }
 
@@ -1139,8 +1139,8 @@ define dso_local i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) n
 ; CHECK-NEXT:    ldrb w0, [x0, x1]
 ; CHECK-NEXT:    ret
   %addr_int = add i64 %base, %off
-  %addr = inttoptr i64 %addr_int to i8*
-  %val = load atomic i8, i8* %addr monotonic, align 1
+  %addr = inttoptr i64 %addr_int to ptr
+  %val = load atomic i8, ptr %addr monotonic, align 1
   ret i8 %val
 }
 
@@ -1151,7 +1151,7 @@ define dso_local i8 @test_atomic_load_acquire_i8() nounwind {
 ; CHECK-NEXT:    add x8, x8, :lo12:var8
 ; CHECK-NEXT:    ldarb w0, [x8]
 ; CHECK-NEXT:    ret
-  %val = load atomic i8, i8* @var8 acquire, align 1
+  %val = load atomic i8, ptr @var8 acquire, align 1
   ret i8 %val
 }
 
@@ -1162,7 +1162,7 @@ define dso_local i8 @test_atomic_load_seq_cst_i8() nounwind {
 ; CHECK-NEXT:    add x8, x8, :lo12:var8
 ; CHECK-NEXT:    ldarb w0, [x8]
 ; CHECK-NEXT:    ret
-  %val = load atomic i8, i8* @var8 seq_cst, align 1
+  %val = load atomic i8, ptr @var8 seq_cst, align 1
   ret i8 %val
 }
 
@@ -1172,7 +1172,7 @@ define dso_local i16 @test_atomic_load_monotonic_i16() nounwind {
 ; CHECK-NEXT:    adrp x8, var16
 ; CHECK-NEXT:    ldrh w0, [x8, :lo12:var16]
 ; CHECK-NEXT:    ret
-  %val = load atomic i16, i16* @var16 monotonic, align 2
+  %val = load atomic i16, ptr @var16 monotonic, align 2
   ret i16 %val
 }
 
@@ -1182,8 +1182,8 @@ define dso_local i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off)
 ; CHECK-NEXT:    ldr w0, [x0, x1]
 ; CHECK-NEXT:    ret
   %addr_int = add i64 %base, %off
-  %addr = inttoptr i64 %addr_int to i32*
-  %val = load atomic i32, i32* %addr monotonic, align 4
+  %addr = inttoptr i64 %addr_int to ptr
+  %val = load atomic i32, ptr %addr monotonic, align 4
   ret i32 %val
 }
 
@@ -1194,7 +1194,7 @@ define dso_local i64 @test_atomic_load_seq_cst_i64() nounwind {
 ; CHECK-NEXT:    add x8, x8, :lo12:var64
 ; CHECK-NEXT:    ldar x0, [x8]
 ; CHECK-NEXT:    ret
-  %val = load atomic i64, i64* @var64 seq_cst, align 8
+  %val = load atomic i64, ptr @var64 seq_cst, align 8
   ret i64 %val
 }
 
@@ -1204,7 +1204,7 @@ define dso_local void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
 ; CHECK-NEXT:    adrp x8, var8
 ; CHECK-NEXT:    strb w0, [x8, :lo12:var8]
 ; CHECK-NEXT:    ret
-  store atomic i8 %val, i8* @var8 monotonic, align 1
+  store atomic i8 %val, ptr @var8 monotonic, align 1
   ret void
 }
 
@@ -1214,8 +1214,8 @@ define dso_local void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off
 ; CHECK-NEXT:    strb w2, [x0, x1]
 ; CHECK-NEXT:    ret
   %addr_int = add i64 %base, %off
-  %addr = inttoptr i64 %addr_int to i8*
-  store atomic i8 %val, i8* %addr monotonic, align 1
+  %addr = inttoptr i64 %addr_int to ptr
+  store atomic i8 %val, ptr %addr monotonic, align 1
   ret void
 }
 define dso_local void @test_atomic_store_release_i8(i8 %val) nounwind {
@@ -1225,7 +1225,7 @@ define dso_local void @test_atomic_store_release_i8(i8 %val) nounwind {
 ; CHECK-NEXT:    add x8, x8, :lo12:var8
 ; CHECK-NEXT:    stlrb w0, [x8]
 ; CHECK-NEXT:    ret
-  store atomic i8 %val, i8* @var8 release, align 1
+  store atomic i8 %val, ptr @var8 release, align 1
   ret void
 }
 
@@ -1236,7 +1236,7 @@ define dso_local void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
 ; CHECK-NEXT:    add x8, x8, :lo12:var8
 ; CHECK-NEXT:    stlrb w0, [x8]
 ; CHECK-NEXT:    ret
-  store atomic i8 %val, i8* @var8 seq_cst, align 1
+  store atomic i8 %val, ptr @var8 seq_cst, align 1
   ret void
 }
 
@@ -1246,7 +1246,7 @@ define dso_local void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
 ; CHECK-NEXT:    adrp x8, var16
 ; CHECK-NEXT:    strh w0, [x8, :lo12:var16]
 ; CHECK-NEXT:    ret
-  store atomic i16 %val, i16* @var16 monotonic, align 2
+  store atomic i16 %val, ptr @var16 monotonic, align 2
   ret void
 }
 
@@ -1256,8 +1256,8 @@ define dso_local void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %of
 ; CHECK-NEXT:    str w2, [x0, x1]
 ; CHECK-NEXT:    ret
   %addr_int = add i64 %base, %off
-  %addr = inttoptr i64 %addr_int to i32*
-  store atomic i32 %val, i32* %addr monotonic, align 4
+  %addr = inttoptr i64 %addr_int to ptr
+  store atomic i32 %val, ptr %addr monotonic, align 4
   ret void
 }
 
@@ -1268,6 +1268,6 @@ define dso_local void @test_atomic_store_release_i64(i64 %val) nounwind {
 ; CHECK-NEXT:    add x8, x8, :lo12:var64
 ; CHECK-NEXT:    stlr x0, [x8]
 ; CHECK-NEXT:    ret
-  store atomic i64 %val, i64* @var64 release, align 8
+  store atomic i64 %val, ptr @var64 release, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
index 3fd7d9addf8c0..37a7782caeed9 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
@@ -4,7 +4,7 @@
 
 ; Ensure there's no stack spill in between ldxr/stxr pairs.
 
-define i8 @test_rmw_add_8(i8* %dst)   {
+define i8 @test_rmw_add_8(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_add_8:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -49,11 +49,11 @@ define i8 @test_rmw_add_8(i8* %dst)   {
 ; LSE-NEXT:    ldaddalb w8, w0, [x0]
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw add i8* %dst, i8 1 seq_cst
+  %res = atomicrmw add ptr %dst, i8 1 seq_cst
   ret i8 %res
 }
 
-define i16 @test_rmw_add_16(i16* %dst)   {
+define i16 @test_rmw_add_16(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_add_16:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -98,11 +98,11 @@ define i16 @test_rmw_add_16(i16* %dst)   {
 ; LSE-NEXT:    ldaddalh w8, w0, [x0]
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw add i16* %dst, i16 1 seq_cst
+  %res = atomicrmw add ptr %dst, i16 1 seq_cst
   ret i16 %res
 }
 
-define i32 @test_rmw_add_32(i32* %dst)   {
+define i32 @test_rmw_add_32(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_add_32:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -147,11 +147,11 @@ define i32 @test_rmw_add_32(i32* %dst)   {
 ; LSE-NEXT:    ldaddal w8, w0, [x0]
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw add i32* %dst, i32 1 seq_cst
+  %res = atomicrmw add ptr %dst, i32 1 seq_cst
   ret i32 %res
 }
 
-define i64 @test_rmw_add_64(i64* %dst)   {
+define i64 @test_rmw_add_64(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_add_64:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -197,11 +197,11 @@ define i64 @test_rmw_add_64(i64* %dst)   {
 ; LSE-NEXT:    ldaddal x8, x0, [x0]
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw add i64* %dst, i64 1 seq_cst
+  %res = atomicrmw add ptr %dst, i64 1 seq_cst
   ret i64 %res
 }
 
-define i128 @test_rmw_add_128(i128* %dst)   {
+define i128 @test_rmw_add_128(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_add_128:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #48
@@ -294,10 +294,10 @@ define i128 @test_rmw_add_128(i128* %dst)   {
 ; LSE-NEXT:    add sp, sp, #48
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw add i128* %dst, i128 1 seq_cst
+  %res = atomicrmw add ptr %dst, i128 1 seq_cst
   ret i128 %res
 }
-define i8 @test_rmw_nand_8(i8* %dst)   {
+define i8 @test_rmw_nand_8(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_8:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -365,11 +365,11 @@ define i8 @test_rmw_nand_8(i8* %dst)   {
 ; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw nand i8* %dst, i8 1 seq_cst
+  %res = atomicrmw nand ptr %dst, i8 1 seq_cst
   ret i8 %res
 }
 
-define i16 @test_rmw_nand_16(i16* %dst)   {
+define i16 @test_rmw_nand_16(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_16:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -437,11 +437,11 @@ define i16 @test_rmw_nand_16(i16* %dst)   {
 ; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw nand i16* %dst, i16 1 seq_cst
+  %res = atomicrmw nand ptr %dst, i16 1 seq_cst
   ret i16 %res
 }
 
-define i32 @test_rmw_nand_32(i32* %dst)   {
+define i32 @test_rmw_nand_32(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_32:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -509,11 +509,11 @@ define i32 @test_rmw_nand_32(i32* %dst)   {
 ; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw nand i32* %dst, i32 1 seq_cst
+  %res = atomicrmw nand ptr %dst, i32 1 seq_cst
   ret i32 %res
 }
 
-define i64 @test_rmw_nand_64(i64* %dst)   {
+define i64 @test_rmw_nand_64(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_64:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -587,11 +587,11 @@ define i64 @test_rmw_nand_64(i64* %dst)   {
 ; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw nand i64* %dst, i64 1 seq_cst
+  %res = atomicrmw nand ptr %dst, i64 1 seq_cst
   ret i64 %res
 }
 
-define i128 @test_rmw_nand_128(i128* %dst)   {
+define i128 @test_rmw_nand_128(ptr %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_128:
 ; NOLSE:       // %bb.0: // %entry
 ; NOLSE-NEXT:    sub sp, sp, #48
@@ -692,6 +692,6 @@ define i128 @test_rmw_nand_128(i128* %dst)   {
 ; LSE-NEXT:    add sp, sp, #48
 ; LSE-NEXT:    ret
 entry:
-  %res = atomicrmw nand i128* %dst, i128 1 seq_cst
+  %res = atomicrmw nand ptr %dst, i128 1 seq_cst
   ret i128 %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll b/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
index cb2c5c551548e..98033a8e449ff 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-- -O1 -fast-isel=0 -global-isel=false %s -o - | FileCheck %s -check-prefix=NOLSE
 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-- -mattr=+lse -O1 -fast-isel=0 -global-isel=false %s -o - | FileCheck %s -check-prefix=LSE
 
-define half @test_rmw_xchg_f16(half* %dst, half %new) {
+define half @test_rmw_xchg_f16(ptr %dst, half %new) {
 ; NOLSE-LABEL: test_rmw_xchg_f16:
 ; NOLSE:       // %bb.0:
 ; NOLSE-NEXT:    // kill: def $h0 killed $h0 def $s0
@@ -25,11 +25,11 @@ define half @test_rmw_xchg_f16(half* %dst, half %new) {
 ; LSE-NEXT:    fmov s0, w8
 ; LSE-NEXT:    // kill: def $h0 killed $h0 killed $s0
 ; LSE-NEXT:    ret
-  %res = atomicrmw xchg half* %dst, half %new seq_cst
+  %res = atomicrmw xchg ptr %dst, half %new seq_cst
   ret half %res
 }
 
-define float @test_rmw_xchg_f32(float* %dst, float %new) {
+define float @test_rmw_xchg_f32(ptr %dst, float %new) {
 ; NOLSE-LABEL: test_rmw_xchg_f32:
 ; NOLSE:       // %bb.0:
 ; NOLSE-NEXT:    fmov w9, s0
@@ -48,11 +48,11 @@ define float @test_rmw_xchg_f32(float* %dst, float %new) {
 ; LSE-NEXT:    swpal w8, w8, [x0]
 ; LSE-NEXT:    fmov s0, w8
 ; LSE-NEXT:    ret
-  %res = atomicrmw xchg float* %dst, float %new seq_cst
+  %res = atomicrmw xchg ptr %dst, float %new seq_cst
   ret float %res
 }
 
-define double @test_rmw_xchg_f64(double* %dst, double %new) {
+define double @test_rmw_xchg_f64(ptr %dst, double %new) {
 ; NOLSE-LABEL: test_rmw_xchg_f64:
 ; NOLSE:       // %bb.0:
 ; NOLSE-NEXT:    fmov x8, d0
@@ -71,11 +71,11 @@ define double @test_rmw_xchg_f64(double* %dst, double %new) {
 ; LSE-NEXT:    swpal x8, x8, [x0]
 ; LSE-NEXT:    fmov d0, x8
 ; LSE-NEXT:    ret
-  %res = atomicrmw xchg double* %dst, double %new seq_cst
+  %res = atomicrmw xchg ptr %dst, double %new seq_cst
   ret double %res
 }
 
-define fp128 @test_rmw_xchg_f128(fp128* %dst, fp128 %new) {
+define fp128 @test_rmw_xchg_f128(ptr %dst, fp128 %new) {
 ; NOLSE-LABEL: test_rmw_xchg_f128:
 ; NOLSE:       // %bb.0:
 ; NOLSE-NEXT:    sub sp, sp, #32
@@ -113,6 +113,6 @@ define fp128 @test_rmw_xchg_f128(fp128* %dst, fp128 %new) {
 ; LSE-NEXT:    stp x4, x5, [sp]
 ; LSE-NEXT:    ldr q0, [sp], #32
 ; LSE-NEXT:    ret
-  %res = atomicrmw xchg fp128* %dst, fp128 %new seq_cst
+  %res = atomicrmw xchg ptr %dst, fp128 %new seq_cst
   ret fp128 %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/basic-pic.ll b/llvm/test/CodeGen/AArch64/basic-pic.ll
index f8071d20eb85e..145137e6c1350 100644
--- a/llvm/test/CodeGen/AArch64/basic-pic.ll
+++ b/llvm/test/CodeGen/AArch64/basic-pic.ll
@@ -5,7 +5,7 @@
 define i32 @get_globalvar() {
 ; CHECK-LABEL: get_globalvar:
 
-  %val = load i32, i32* @var
+  %val = load i32, ptr @var
 ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
 ; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], :got_lo12:var]
 ; CHECK: ldr w0, [x[[GOTLOC]]]
@@ -13,12 +13,12 @@ define i32 @get_globalvar() {
   ret i32 %val
 }
 
-define i32* @get_globalvaraddr() {
+define ptr @get_globalvaraddr() {
 ; CHECK-LABEL: get_globalvaraddr:
 
-  %val = load i32, i32* @var
+  %val = load i32, ptr @var
 ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
 ; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:var]
 
-  ret i32* @var
+  ret ptr @var
 }

diff  --git a/llvm/test/CodeGen/AArch64/bcmp-inline-small.ll b/llvm/test/CodeGen/AArch64/bcmp-inline-small.ll
index e032db265e6f2..5cb0720d1bc66 100644
--- a/llvm/test/CodeGen/AArch64/bcmp-inline-small.ll
+++ b/llvm/test/CodeGen/AArch64/bcmp-inline-small.ll
@@ -2,10 +2,10 @@
 ; RUN: llc -O2 < %s -mtriple=aarch64-linux-gnu                     | FileCheck %s --check-prefix=CHECKN
 ; RUN: llc -O2 < %s -mtriple=aarch64-linux-gnu -mattr=strict-align | FileCheck %s --check-prefix=CHECKS
 
-declare i32 @bcmp(i8*, i8*, i64) nounwind readonly
-declare i32 @memcmp(i8*, i8*, i64) nounwind readonly
+declare i32 @bcmp(ptr, ptr, i64) nounwind readonly
+declare i32 @memcmp(ptr, ptr, i64) nounwind readonly
 
-define i1 @test_b2(i8* %s1, i8* %s2) {
+define i1 @test_b2(ptr %s1, ptr %s2) {
 ; CHECKN-LABEL: test_b2:
 ; CHECKN:       // %bb.0: // %entry
 ; CHECKN-NEXT:    ldr x8, [x0]
@@ -29,13 +29,13 @@ define i1 @test_b2(i8* %s1, i8* %s2) {
 ; CHECKS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECKS-NEXT:    ret
 entry:
-  %bcmp = call i32 @bcmp(i8* %s1, i8* %s2, i64 15)
+  %bcmp = call i32 @bcmp(ptr %s1, ptr %s2, i64 15)
   %ret = icmp eq i32 %bcmp, 0
   ret i1 %ret
 }
 
 ; TODO: Four loads should be within the limit, but the heuristic isn't implemented.
-define i1 @test_b2_align8(i8* align 8 %s1, i8* align 8 %s2) {
+define i1 @test_b2_align8(ptr align 8 %s1, ptr align 8 %s2) {
 ; CHECKN-LABEL: test_b2_align8:
 ; CHECKN:       // %bb.0: // %entry
 ; CHECKN-NEXT:    ldr x8, [x0]
@@ -59,12 +59,12 @@ define i1 @test_b2_align8(i8* align 8 %s1, i8* align 8 %s2) {
 ; CHECKS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECKS-NEXT:    ret
 entry:
-  %bcmp = call i32 @bcmp(i8* %s1, i8* %s2, i64 15)
+  %bcmp = call i32 @bcmp(ptr %s1, ptr %s2, i64 15)
   %ret = icmp eq i32 %bcmp, 0
   ret i1 %ret
 }
 
-define i1 @test_bs(i8* %s1, i8* %s2) optsize {
+define i1 @test_bs(ptr %s1, ptr %s2) optsize {
 ; CHECKN-LABEL: test_bs:
 ; CHECKN:       // %bb.0: // %entry
 ; CHECKN-NEXT:    ldp x8, x9, [x0]
@@ -92,7 +92,7 @@ define i1 @test_bs(i8* %s1, i8* %s2) optsize {
 ; CHECKS-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECKS-NEXT:    ret
 entry:
-  %memcmp = call i32 @memcmp(i8* %s1, i8* %s2, i64 31)
+  %memcmp = call i32 @memcmp(ptr %s1, ptr %s2, i64 31)
   %ret = icmp eq i32 %memcmp, 0
   ret i1 %ret
 }

diff  --git a/llvm/test/CodeGen/AArch64/bf16-shuffle.ll b/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
index 4a2d2f3b2bc18..8706cd21aa2c8 100644
--- a/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-shuffle.ll
@@ -315,13 +315,13 @@ entry:
   ret <8 x bfloat> %shuffle.i
 }
 
-define <4 x bfloat> @test_vld_dup1_4xbfloat(bfloat* %b) {
+define <4 x bfloat> @test_vld_dup1_4xbfloat(ptr %b) {
 ; CHECK-LABEL: test_vld_dup1_4xbfloat:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.4h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %b1 = load bfloat, bfloat* %b, align 2
+  %b1 = load bfloat, ptr %b, align 2
   %vecinit = insertelement <4 x bfloat> undef, bfloat %b1, i32 0
   %vecinit2 = insertelement <4 x bfloat> %vecinit, bfloat %b1, i32 1
   %vecinit3 = insertelement <4 x bfloat> %vecinit2, bfloat %b1, i32 2
@@ -329,13 +329,13 @@ entry:
   ret <4 x bfloat> %vecinit4
 }
 
-define <8 x bfloat> @test_vld_dup1_8xbfloat(bfloat* %b) local_unnamed_addr {
+define <8 x bfloat> @test_vld_dup1_8xbfloat(ptr %b) local_unnamed_addr {
 ; CHECK-LABEL: test_vld_dup1_8xbfloat:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld1r { v0.8h }, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %b1 = load bfloat, bfloat* %b, align 2
+  %b1 = load bfloat, ptr %b, align 2
   %vecinit = insertelement <8 x bfloat> undef, bfloat %b1, i32 0
   %vecinit8 = shufflevector <8 x bfloat> %vecinit, <8 x bfloat> undef, <8 x i32> zeroinitializer
   ret <8 x bfloat> %vecinit8

diff  --git a/llvm/test/CodeGen/AArch64/bf16.ll b/llvm/test/CodeGen/AArch64/bf16.ll
index 49545cb30c09d..ad05f07b9b8be 100644
--- a/llvm/test/CodeGen/AArch64/bf16.ll
+++ b/llvm/test/CodeGen/AArch64/bf16.ll
@@ -3,37 +3,37 @@
 
 ; test argument passing and simple load/store
 
-define bfloat @test_load(bfloat* %p) nounwind {
+define bfloat @test_load(ptr %p) nounwind {
 ; CHECK-LABEL: test_load:
 ; CHECK-NEXT: ldr h0, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load bfloat, bfloat* %p, align 16
+  %tmp1 = load bfloat, ptr %p, align 16
   ret bfloat %tmp1
 }
 
-define <4 x bfloat> @test_vec_load(<4 x bfloat>* %p) nounwind {
+define <4 x bfloat> @test_vec_load(ptr %p) nounwind {
 ; CHECK-LABEL: test_vec_load:
 ; CHECK-NEXT: ldr d0, [x0]
 ; CHECK-NEXT: ret
-  %tmp1 = load <4 x bfloat>, <4 x bfloat>* %p, align 16
+  %tmp1 = load <4 x bfloat>, ptr %p, align 16
   ret <4 x bfloat> %tmp1
 }
 
-define void @test_store(bfloat* %a, bfloat %b) nounwind {
+define void @test_store(ptr %a, bfloat %b) nounwind {
 ; CHECK-LABEL: test_store:
 ; CHECK-NEXT: str h0, [x0]
 ; CHECK-NEXT: ret
-  store bfloat %b, bfloat* %a, align 16
+  store bfloat %b, ptr %a, align 16
   ret void
 }
 
 ; Simple store of v4bf16
-define void @test_vec_store(<4 x bfloat>* %a, <4 x bfloat> %b) nounwind {
+define void @test_vec_store(ptr %a, <4 x bfloat> %b) nounwind {
 ; CHECK-LABEL: test_vec_store:
 ; CHECK-NEXT: str d0, [x0]
 ; CHECK-NEXT: ret
 entry:
-  store <4 x bfloat> %b, <4 x bfloat>* %a, align 16
+  store <4 x bfloat> %b, ptr %a, align 16
   ret void
 }
 
@@ -44,43 +44,43 @@ define <8 x bfloat> @test_build_vector_const() {
   ret  <8 x bfloat> <bfloat 0xR3F80, bfloat 0xR3F80, bfloat 0xR3F80, bfloat 0xR3F80, bfloat 0xR3F80, bfloat 0xR3F80, bfloat 0xR3F80, bfloat 0xR3F80>
 }
 
-define { bfloat, bfloat* } @test_store_post(bfloat %val, bfloat* %ptr) {
+define { bfloat, ptr } @test_store_post(bfloat %val, ptr %ptr) {
 ; CHECK-LABEL: test_store_post:
 ; CHECK: str h0, [x0], #2
 
-  store bfloat %val, bfloat* %ptr
-  %res.tmp = insertvalue { bfloat, bfloat* } undef, bfloat %val, 0
+  store bfloat %val, ptr %ptr
+  %res.tmp = insertvalue { bfloat, ptr } undef, bfloat %val, 0
 
-  %next = getelementptr bfloat, bfloat* %ptr, i32 1
-  %res = insertvalue { bfloat, bfloat* } %res.tmp, bfloat* %next, 1
+  %next = getelementptr bfloat, ptr %ptr, i32 1
+  %res = insertvalue { bfloat, ptr } %res.tmp, ptr %next, 1
 
-  ret { bfloat, bfloat* } %res
+  ret { bfloat, ptr } %res
 }
 
-define { <4 x bfloat>, <4 x bfloat>* } @test_store_post_v4bf16(<4 x bfloat> %val, <4 x bfloat>* %ptr) {
+define { <4 x bfloat>, ptr } @test_store_post_v4bf16(<4 x bfloat> %val, ptr %ptr) {
 ; CHECK-LABEL: test_store_post_v4bf16:
 ; CHECK: str d0, [x0], #8
 
-  store <4 x bfloat> %val, <4 x bfloat>* %ptr
-  %res.tmp = insertvalue { <4 x bfloat>, <4 x bfloat>* } undef, <4 x bfloat> %val, 0
+  store <4 x bfloat> %val, ptr %ptr
+  %res.tmp = insertvalue { <4 x bfloat>, ptr } undef, <4 x bfloat> %val, 0
 
-  %next = getelementptr <4 x bfloat>, <4 x bfloat>* %ptr, i32 1
-  %res = insertvalue { <4 x bfloat>, <4 x bfloat>* } %res.tmp, <4 x bfloat>* %next, 1
+  %next = getelementptr <4 x bfloat>, ptr %ptr, i32 1
+  %res = insertvalue { <4 x bfloat>, ptr } %res.tmp, ptr %next, 1
 
-  ret { <4 x bfloat>, <4 x bfloat>* } %res
+  ret { <4 x bfloat>, ptr } %res
 }
 
-define { <8 x bfloat>, <8 x bfloat>* } @test_store_post_v8bf16(<8 x bfloat> %val, <8 x bfloat>* %ptr) {
+define { <8 x bfloat>, ptr } @test_store_post_v8bf16(<8 x bfloat> %val, ptr %ptr) {
 ; CHECK-LABEL: test_store_post_v8bf16:
 ; CHECK: str q0, [x0], #16
 
-  store <8 x bfloat> %val, <8 x bfloat>* %ptr
-  %res.tmp = insertvalue { <8 x bfloat>, <8 x bfloat>* } undef, <8 x bfloat> %val, 0
+  store <8 x bfloat> %val, ptr %ptr
+  %res.tmp = insertvalue { <8 x bfloat>, ptr } undef, <8 x bfloat> %val, 0
 
-  %next = getelementptr <8 x bfloat>, <8 x bfloat>* %ptr, i32 1
-  %res = insertvalue { <8 x bfloat>, <8 x bfloat>* } %res.tmp, <8 x bfloat>* %next, 1
+  %next = getelementptr <8 x bfloat>, ptr %ptr, i32 1
+  %res = insertvalue { <8 x bfloat>, ptr } %res.tmp, ptr %next, 1
 
-  ret { <8 x bfloat>, <8 x bfloat>* } %res
+  ret { <8 x bfloat>, ptr } %res
 }
 
 define bfloat @test_bitcast_halftobfloat(half %a) nounwind {

diff  --git a/llvm/test/CodeGen/AArch64/bfis-in-loop.ll b/llvm/test/CodeGen/AArch64/bfis-in-loop.ll
index 5207f2ba32d36..5f3879164b330 100644
--- a/llvm/test/CodeGen/AArch64/bfis-in-loop.ll
+++ b/llvm/test/CodeGen/AArch64/bfis-in-loop.ll
@@ -4,9 +4,9 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 
 %struct.bar = type { %struct.foo }
-%struct.foo = type { %struct.wobble* }
-%struct.wobble = type { %struct.zot* }
-%struct.zot = type <{ %struct.wobble, %struct.zot*, %struct.wobble*, i8, [7 x i8] }>
+%struct.foo = type { ptr }
+%struct.wobble = type { ptr }
+%struct.zot = type <{ %struct.wobble, ptr, ptr, i8, [7 x i8] }>
 
 @global = external global %struct.bar, align 8
 
@@ -36,7 +36,7 @@ define i64 @bfis_in_loop_zero() {
 ; CHECK-NEXT:  // %bb.2: // %exit
 ; CHECK-NEXT:    ret
 entry:
-  %var = load %struct.wobble*, %struct.wobble** getelementptr inbounds (%struct.bar, %struct.bar* @global, i64 0, i32 0, i32 0), align 8
+  %var = load ptr, ptr @global, align 8
   br label %preheader
 
 preheader:
@@ -44,14 +44,13 @@ preheader:
 
 header:                                              ; preds = %bb63, %bb
   %var4 = phi i64 [ %var30, %latch ], [ 0, %preheader ]
-  %var5 = phi %struct.wobble* [ %var38, %latch ], [ %var, %preheader ]
+  %var5 = phi ptr [ %var38, %latch ], [ %var, %preheader ]
   %var6 = phi i8 [ %var21, %latch ], [ 0, %preheader ]
   br label %midblock
 
 midblock:                                             ; preds = %bb9
-  %var15 = getelementptr inbounds %struct.wobble, %struct.wobble* %var5, i64 9
-  %var16 = bitcast %struct.wobble* %var15 to i16*
-  %var17 = load i16, i16* %var16, align 8
+  %var15 = getelementptr inbounds %struct.wobble, ptr %var5, i64 9
+  %var17 = load i16, ptr %var15, align 8
   %var18 = icmp eq i16 %var17, 0
   %var19 = lshr i16 %var17, 8
   %var20 = trunc i16 %var19 to i8
@@ -68,11 +67,11 @@ midblock:                                             ; preds = %bb9
   br label %latch
 
 latch:                                             ; preds = %bb14, %bb9
-  %var34 = getelementptr inbounds %struct.wobble, %struct.wobble* %var5, i64 1, i32 0
-  %var35 = load %struct.zot*, %struct.zot** %var34, align 8
-  %var36 = icmp eq %struct.zot* %var35, null
-  %var37 = getelementptr inbounds %struct.zot, %struct.zot* %var35, i64 0, i32 2
-  %var38 = load %struct.wobble*, %struct.wobble** %var37, align 8
+  %var34 = getelementptr inbounds %struct.wobble, ptr %var5, i64 1, i32 0
+  %var35 = load ptr, ptr %var34, align 8
+  %var36 = icmp eq ptr %var35, null
+  %var37 = getelementptr inbounds %struct.zot, ptr %var35, i64 0, i32 2
+  %var38 = load ptr, ptr %var37, align 8
   br i1 %var36, label %exit, label %header
 
 exit:
@@ -105,7 +104,7 @@ define i64 @bfis_in_loop_undef() {
 ; CHECK-NEXT:  // %bb.2: // %exit
 ; CHECK-NEXT:    ret
 entry:
-  %var = load %struct.wobble*, %struct.wobble** getelementptr inbounds (%struct.bar, %struct.bar* @global, i64 0, i32 0, i32 0), align 8
+  %var = load ptr, ptr @global, align 8
   br label %preheader
 
 preheader:
@@ -113,14 +112,13 @@ preheader:
 
 header:                                              ; preds = %bb63, %bb
   %var4 = phi i64 [ %var30, %latch ], [ undef, %preheader ]
-  %var5 = phi %struct.wobble* [ %var38, %latch ], [ %var, %preheader ]
+  %var5 = phi ptr [ %var38, %latch ], [ %var, %preheader ]
   %var6 = phi i8 [ %var21, %latch ], [ undef, %preheader ]
   br label %midblock
 
 midblock:                                             ; preds = %bb9
-  %var15 = getelementptr inbounds %struct.wobble, %struct.wobble* %var5, i64 9
-  %var16 = bitcast %struct.wobble* %var15 to i16*
-  %var17 = load i16, i16* %var16, align 8
+  %var15 = getelementptr inbounds %struct.wobble, ptr %var5, i64 9
+  %var17 = load i16, ptr %var15, align 8
   %var18 = icmp eq i16 %var17, 0
   %var19 = lshr i16 %var17, 8
   %var20 = trunc i16 %var19 to i8
@@ -137,11 +135,11 @@ midblock:                                             ; preds = %bb9
   br label %latch
 
 latch:                                             ; preds = %bb14, %bb9
-  %var34 = getelementptr inbounds %struct.wobble, %struct.wobble* %var5, i64 1, i32 0
-  %var35 = load %struct.zot*, %struct.zot** %var34, align 8
-  %var36 = icmp eq %struct.zot* %var35, null
-  %var37 = getelementptr inbounds %struct.zot, %struct.zot* %var35, i64 0, i32 2
-  %var38 = load %struct.wobble*, %struct.wobble** %var37, align 8
+  %var34 = getelementptr inbounds %struct.wobble, ptr %var5, i64 1, i32 0
+  %var35 = load ptr, ptr %var34, align 8
+  %var36 = icmp eq ptr %var35, null
+  %var37 = getelementptr inbounds %struct.zot, ptr %var35, i64 0, i32 2
+  %var38 = load ptr, ptr %var37, align 8
   br i1 %var36, label %exit, label %header
 
 exit:

diff  --git a/llvm/test/CodeGen/AArch64/big-callframe.ll b/llvm/test/CodeGen/AArch64/big-callframe.ll
index 3ef57d5abc973..2f8600f2c797d 100644
--- a/llvm/test/CodeGen/AArch64/big-callframe.ll
+++ b/llvm/test/CodeGen/AArch64/big-callframe.ll
@@ -6,11 +6,11 @@
 ; CHECK: stur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Spill
 ; CHECK: ldur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Reload
 target triple = "aarch64--"
-declare void @extfunc([4096 x i64]* byval([4096 x i64]) %p)
-define void @func([4096 x i64]* %z) {
+declare void @extfunc(ptr byval([4096 x i64]) %p)
+define void @func(ptr %z) {
   %lvar = alloca [31 x i8]
-  %v = load volatile [31 x i8], [31 x i8]* %lvar
-  store volatile [31 x i8] %v, [31 x i8]* %lvar
-  call void @extfunc([4096 x i64]* byval([4096 x i64]) %z)
+  %v = load volatile [31 x i8], ptr %lvar
+  store volatile [31 x i8] %v, ptr %lvar
+  call void @extfunc(ptr byval([4096 x i64]) %z)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/bitfield-extract.ll b/llvm/test/CodeGen/AArch64/bitfield-extract.ll
index 5462dfd771712..9c8871af4a5e9 100644
--- a/llvm/test/CodeGen/AArch64/bitfield-extract.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield-extract.ll
@@ -101,15 +101,15 @@ declare void @use(i16 signext, i64)
 ; CHECK: ldr d0, [x0], #8
 ; CHECK: ubfx x[[VAL:[0-9]+]], x0, #5, #27
 ; CHECK: str w[[VAL]], [x2]
-define <2 x i32> @test_complex_node(<2 x i32>* %addr, <2 x i32>** %addr2, i32* %bf ) {
-  %vec = load <2 x i32>, <2 x i32>* %addr
+define <2 x i32> @test_complex_node(ptr %addr, ptr %addr2, ptr %bf ) {
+  %vec = load <2 x i32>, ptr %addr
 
-  %vec.next = getelementptr <2 x i32>, <2 x i32>* %addr, i32 1
-  store <2 x i32>* %vec.next, <2 x i32>** %addr2
-  %lo = ptrtoint <2 x i32>* %vec.next to i32
+  %vec.next = getelementptr <2 x i32>, ptr %addr, i32 1
+  store ptr %vec.next, ptr %addr2
+  %lo = ptrtoint ptr %vec.next to i32
 
   %val = lshr i32 %lo, 5
-  store i32 %val, i32* %bf
+  store i32 %val, ptr %bf
 
   ret <2 x i32> %vec
 }

diff  --git a/llvm/test/CodeGen/AArch64/bitfield-insert-0.ll b/llvm/test/CodeGen/AArch64/bitfield-insert-0.ll
index 94e40052f9c9c..80eba9473fcd4 100644
--- a/llvm/test/CodeGen/AArch64/bitfield-insert-0.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield-insert-0.ll
@@ -3,17 +3,17 @@
 ; The encoding of lsb -> immr in the CGed bitfield instructions was wrong at one
 ; point, in the edge case where lsb = 0. Just make sure.
 
-define void @test_bfi0(i32* %existing, i32* %new) {
+define void @test_bfi0(ptr %existing, ptr %new) {
 ; CHECK: bfxil {{w[0-9]+}}, {{w[0-9]+}}, #0, #18
 
-  %oldval = load volatile i32, i32* %existing
+  %oldval = load volatile i32, ptr %existing
   %oldval_keep = and i32 %oldval, 4294705152 ; 0xfffc_0000
 
-  %newval = load volatile i32, i32* %new
+  %newval = load volatile i32, ptr %new
   %newval_masked = and i32 %newval, 262143 ; = 0x0003_ffff
 
   %combined = or i32 %newval_masked, %oldval_keep
-  store volatile i32 %combined, i32* %existing
+  store volatile i32 %combined, ptr %existing
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/bitfield-insert.ll b/llvm/test/CodeGen/AArch64/bitfield-insert.ll
index eeb1b544f57b8..ae19208cfb582 100644
--- a/llvm/test/CodeGen/AArch64/bitfield-insert.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield-insert.ll
@@ -29,7 +29,7 @@ entry:
   ret [1 x i64] %.fca.0.insert
 }
 
-define void @test_whole32(i32* %existing, i32* %new) {
+define void @test_whole32(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_whole32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -37,20 +37,20 @@ define void @test_whole32(i32* %existing, i32* %new) {
 ; CHECK-NEXT:    bfi w8, w9, #26, #5
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i32, i32* %existing
+  %oldval = load volatile i32, ptr %existing
   %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
 
-  %newval = load volatile i32, i32* %new
+  %newval = load volatile i32, ptr %new
   %newval_shifted = shl i32 %newval, 26
   %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
 
   %combined = or i32 %oldval_keep, %newval_masked
-  store volatile i32 %combined, i32* %existing
+  store volatile i32 %combined, ptr %existing
 
   ret void
 }
 
-define void @test_whole64(i64* %existing, i64* %new) {
+define void @test_whole64(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_whole64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -58,20 +58,20 @@ define void @test_whole64(i64* %existing, i64* %new) {
 ; CHECK-NEXT:    bfi x8, x9, #26, #14
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i64, i64* %existing
+  %oldval = load volatile i64, ptr %existing
   %oldval_keep = and i64 %oldval, 18446742974265032703 ; = 0xffffff0003ffffffL
 
-  %newval = load volatile i64, i64* %new
+  %newval = load volatile i64, ptr %new
   %newval_shifted = shl i64 %newval, 26
   %newval_masked = and i64 %newval_shifted, 1099444518912 ; = 0xfffc000000
 
   %combined = or i64 %oldval_keep, %newval_masked
-  store volatile i64 %combined, i64* %existing
+  store volatile i64 %combined, ptr %existing
 
   ret void
 }
 
-define void @test_whole32_from64(i64* %existing, i64* %new) {
+define void @test_whole32_from64(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_whole32_from64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -80,19 +80,19 @@ define void @test_whole32_from64(i64* %existing, i64* %new) {
 ; CHECK-NEXT:    bfxil x8, x9, #0, #16
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i64, i64* %existing
+  %oldval = load volatile i64, ptr %existing
   %oldval_keep = and i64 %oldval, 4294901760 ; = 0xffff0000
 
-  %newval = load volatile i64, i64* %new
+  %newval = load volatile i64, ptr %new
   %newval_masked = and i64 %newval, 65535 ; = 0xffff
 
   %combined = or i64 %oldval_keep, %newval_masked
-  store volatile i64 %combined, i64* %existing
+  store volatile i64 %combined, ptr %existing
 
   ret void
 }
 
-define void @test_32bit_masked(i32 *%existing, i32 *%new) {
+define void @test_32bit_masked(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_32bit_masked:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -102,20 +102,20 @@ define void @test_32bit_masked(i32 *%existing, i32 *%new) {
 ; CHECK-NEXT:    bfi w8, w9, #3, #4
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i32, i32* %existing
+  %oldval = load volatile i32, ptr %existing
   %oldval_keep = and i32 %oldval, 135 ; = 0x87
 
-  %newval = load volatile i32, i32* %new
+  %newval = load volatile i32, ptr %new
   %newval_shifted = shl i32 %newval, 3
   %newval_masked = and i32 %newval_shifted, 120 ; = 0x78
 
   %combined = or i32 %oldval_keep, %newval_masked
-  store volatile i32 %combined, i32* %existing
+  store volatile i32 %combined, ptr %existing
 
   ret void
 }
 
-define void @test_64bit_masked(i64 *%existing, i64 *%new) {
+define void @test_64bit_masked(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_64bit_masked:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -124,21 +124,21 @@ define void @test_64bit_masked(i64 *%existing, i64 *%new) {
 ; CHECK-NEXT:    bfi x8, x9, #40, #8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i64, i64* %existing
+  %oldval = load volatile i64, ptr %existing
   %oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000
 
-  %newval = load volatile i64, i64* %new
+  %newval = load volatile i64, ptr %new
   %newval_shifted = shl i64 %newval, 40
   %newval_masked = and i64 %newval_shifted, 280375465082880 ; = 0xff00_0000_0000
 
   %combined = or i64 %newval_masked, %oldval_keep
-  store volatile i64 %combined, i64* %existing
+  store volatile i64 %combined, ptr %existing
 
   ret void
 }
 
 ; Mask is too complicated for literal ANDwwi, make sure other avenues are tried.
-define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
+define void @test_32bit_complexmask(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_32bit_complexmask:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -148,21 +148,21 @@ define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
 ; CHECK-NEXT:    bfi w8, w9, #3, #4
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i32, i32* %existing
+  %oldval = load volatile i32, ptr %existing
   %oldval_keep = and i32 %oldval, 647 ; = 0x287
 
-  %newval = load volatile i32, i32* %new
+  %newval = load volatile i32, ptr %new
   %newval_shifted = shl i32 %newval, 3
   %newval_masked = and i32 %newval_shifted, 120 ; = 0x278
 
   %combined = or i32 %oldval_keep, %newval_masked
-  store volatile i32 %combined, i32* %existing
+  store volatile i32 %combined, ptr %existing
 
   ret void
 }
 
 ; Neither mask is a contiguous set of 1s. BFI can't be used
-define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
+define void @test_32bit_badmask(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_32bit_badmask:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -174,21 +174,21 @@ define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
 ; CHECK-NEXT:    orr w8, w8, w9
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i32, i32* %existing
+  %oldval = load volatile i32, ptr %existing
   %oldval_keep = and i32 %oldval, 135 ; = 0x87
 
-  %newval = load volatile i32, i32* %new
+  %newval = load volatile i32, ptr %new
   %newval_shifted = shl i32 %newval, 3
   %newval_masked = and i32 %newval_shifted, 632 ; = 0x278
 
   %combined = or i32 %oldval_keep, %newval_masked
-  store volatile i32 %combined, i32* %existing
+  store volatile i32 %combined, ptr %existing
 
   ret void
 }
 
 ; Ditto
-define void @test_64bit_badmask(i64 *%existing, i64 *%new) {
+define void @test_64bit_badmask(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_64bit_badmask:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -201,22 +201,22 @@ define void @test_64bit_badmask(i64 *%existing, i64 *%new) {
 ; CHECK-NEXT:    orr x8, x8, x9
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i64, i64* %existing
+  %oldval = load volatile i64, ptr %existing
   %oldval_keep = and i64 %oldval, 135 ; = 0x87
 
-  %newval = load volatile i64, i64* %new
+  %newval = load volatile i64, ptr %new
   %newval_shifted = shl i64 %newval, 3
   %newval_masked = and i64 %newval_shifted, 664 ; = 0x278
 
   %combined = or i64 %oldval_keep, %newval_masked
-  store volatile i64 %combined, i64* %existing
+  store volatile i64 %combined, ptr %existing
 
   ret void
 }
 
 ; Bitfield insert where there's a left-over shr needed at the beginning
 ; (e.g. result of str.bf1 = str.bf2)
-define void @test_32bit_with_shr(i32* %existing, i32* %new) {
+define void @test_32bit_with_shr(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_32bit_with_shr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -225,21 +225,21 @@ define void @test_32bit_with_shr(i32* %existing, i32* %new) {
 ; CHECK-NEXT:    bfi w8, w9, #26, #5
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i32, i32* %existing
+  %oldval = load volatile i32, ptr %existing
   %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
 
-  %newval = load i32, i32* %new
+  %newval = load i32, ptr %new
   %newval_shifted = shl i32 %newval, 12
   %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000
 
   %combined = or i32 %oldval_keep, %newval_masked
-  store volatile i32 %combined, i32* %existing
+  store volatile i32 %combined, ptr %existing
 
   ret void
 }
 
 ; Bitfield insert where the second or operand is a better match to be folded into the BFM
-define void @test_32bit_opnd1_better(i32* %existing, i32* %new) {
+define void @test_32bit_opnd1_better(ptr %existing, ptr %new) {
 ; CHECK-LABEL: test_32bit_opnd1_better:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -248,15 +248,15 @@ define void @test_32bit_opnd1_better(i32* %existing, i32* %new) {
 ; CHECK-NEXT:    bfi w8, w9, #16, #8
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %oldval = load volatile i32, i32* %existing
+  %oldval = load volatile i32, ptr %existing
   %oldval_keep = and i32 %oldval, 65535 ; 0x0000ffff
 
-  %newval = load i32, i32* %new
+  %newval = load i32, ptr %new
   %newval_shifted = shl i32 %newval, 16
   %newval_masked = and i32 %newval_shifted, 16711680 ; 0x00ff0000
 
   %combined = or i32 %oldval_keep, %newval_masked
-  store volatile i32 %combined, i32* %existing
+  store volatile i32 %combined, ptr %existing
 
   ret void
 }
@@ -284,7 +284,7 @@ define i32 @test_nouseful_bits(i8 %a, i32 %b) {
   ret i32 %shl.4
 }
 
-define void @test_nouseful_strb(i32* %ptr32, i8* %ptr8, i32 %x)  {
+define void @test_nouseful_strb(ptr %ptr32, ptr %ptr8, i32 %x)  {
 ; CHECK-LABEL: test_nouseful_strb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -292,17 +292,17 @@ define void @test_nouseful_strb(i32* %ptr32, i8* %ptr8, i32 %x)  {
 ; CHECK-NEXT:    strb w8, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %ptr32, align 8
+  %0 = load i32, ptr %ptr32, align 8
   %and = and i32 %0, -8
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %trunc = trunc i32 %or to i8
-  store i8 %trunc, i8* %ptr8
+  store i8 %trunc, ptr %ptr8
   ret void
 }
 
-define void @test_nouseful_strh(i32* %ptr32, i16* %ptr16, i32 %x)  {
+define void @test_nouseful_strh(ptr %ptr32, ptr %ptr16, i32 %x)  {
 ; CHECK-LABEL: test_nouseful_strh:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -310,17 +310,17 @@ define void @test_nouseful_strh(i32* %ptr32, i16* %ptr16, i32 %x)  {
 ; CHECK-NEXT:    strh w8, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %ptr32, align 8
+  %0 = load i32, ptr %ptr32, align 8
   %and = and i32 %0, -16
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 15
   %or = or i32 %and, %and1
   %trunc = trunc i32 %or to i16
-  store i16 %trunc, i16* %ptr16
+  store i16 %trunc, ptr %ptr16
   ret void
 }
 
-define void @test_nouseful_sturb(i32* %ptr32, i8* %ptr8, i32 %x)  {
+define void @test_nouseful_sturb(ptr %ptr32, ptr %ptr8, i32 %x)  {
 ; CHECK-LABEL: test_nouseful_sturb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -328,18 +328,18 @@ define void @test_nouseful_sturb(i32* %ptr32, i8* %ptr8, i32 %x)  {
 ; CHECK-NEXT:    sturb w8, [x1, #-1]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %ptr32, align 8
+  %0 = load i32, ptr %ptr32, align 8
   %and = and i32 %0, -8
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 7
   %or = or i32 %and, %and1
   %trunc = trunc i32 %or to i8
-  %gep = getelementptr i8, i8* %ptr8, i64 -1
-  store i8 %trunc, i8* %gep
+  %gep = getelementptr i8, ptr %ptr8, i64 -1
+  store i8 %trunc, ptr %gep
   ret void
 }
 
-define void @test_nouseful_sturh(i32* %ptr32, i16* %ptr16, i32 %x)  {
+define void @test_nouseful_sturh(ptr %ptr32, ptr %ptr16, i32 %x)  {
 ; CHECK-LABEL: test_nouseful_sturh:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -347,14 +347,14 @@ define void @test_nouseful_sturh(i32* %ptr32, i16* %ptr16, i32 %x)  {
 ; CHECK-NEXT:    sturh w8, [x1, #-2]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %ptr32, align 8
+  %0 = load i32, ptr %ptr32, align 8
   %and = and i32 %0, -16
   %shr = lshr i32 %x, 16
   %and1 = and i32 %shr, 15
   %or = or i32 %and, %and1
   %trunc = trunc i32 %or to i16
-  %gep = getelementptr i16, i16* %ptr16, i64 -1
-  store i16 %trunc, i16* %gep
+  %gep = getelementptr i16, ptr %ptr16, i64 -1
+  store i16 %trunc, ptr %gep
   ret void
 }
 
@@ -403,7 +403,7 @@ entry:
 }
 
 ; Don't convert 'and' with multiple uses.
-define i32 @test_or_and_and4(i32 %a, i32 %b, i32* %ptr) {
+define i32 @test_or_and_and4(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_or_and_and4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    and w8, w0, #0xffff000f
@@ -413,14 +413,14 @@ define i32 @test_or_and_and4(i32 %a, i32 %b, i32* %ptr) {
 ; CHECK-NEXT:    ret
 entry:
   %and = and i32 %a, -65521
-  store i32 %and, i32* %ptr, align 4
+  store i32 %and, ptr %ptr, align 4
   %and2 = and i32 %b, 65520
   %or = or i32 %and2, %and
   ret i32 %or
 }
 
 ; Don't convert 'and' with multiple uses.
-define i32 @test_or_and_and5(i32 %a, i32 %b, i32* %ptr) {
+define i32 @test_or_and_and5(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_or_and_and5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    and w8, w1, #0xfff0
@@ -430,7 +430,7 @@ define i32 @test_or_and_and5(i32 %a, i32 %b, i32* %ptr) {
 ; CHECK-NEXT:    ret
 entry:
   %and = and i32 %b, 65520
-  store i32 %and, i32* %ptr, align 4
+  store i32 %and, ptr %ptr, align 4
   %and1 = and i32 %a, -65521
   %or = or i32 %and, %and1
   ret i32 %or
@@ -557,21 +557,21 @@ define i32 @test9(i64 %b, i32 %e) {
   ret i32 %h
 }
 
-define <2 x i32> @test_complex_type(<2 x i32>* %addr, i64 %in, i64* %bf ) {
+define <2 x i32> @test_complex_type(ptr %addr, i64 %in, ptr %bf ) {
 ; CHECK-LABEL: test_complex_type:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0], #8
 ; CHECK-NEXT:    orr x8, x0, x1, lsl #32
 ; CHECK-NEXT:    str x8, [x2]
 ; CHECK-NEXT:    ret
-  %vec = load <2 x i32>, <2 x i32>* %addr
+  %vec = load <2 x i32>, ptr %addr
 
-  %vec.next = getelementptr <2 x i32>, <2 x i32>* %addr, i32 1
-  %lo = ptrtoint <2 x i32>* %vec.next to i64
+  %vec.next = getelementptr <2 x i32>, ptr %addr, i32 1
+  %lo = ptrtoint ptr %vec.next to i64
 
   %hi = shl i64 %in, 32
   %both = or i64 %lo, %hi
-  store i64 %both, i64* %bf
+  store i64 %both, ptr %bf
 
   ret <2 x i32> %vec
 }

diff  --git a/llvm/test/CodeGen/AArch64/bitfield.ll b/llvm/test/CodeGen/AArch64/bitfield.ll
index 58fd0db036caa..1dfa4a8e12001 100644
--- a/llvm/test/CodeGen/AArch64/bitfield.ll
+++ b/llvm/test/CodeGen/AArch64/bitfield.ll
@@ -7,14 +7,14 @@ define dso_local void @test_extendb32(i8 %var) {
 ; CHECK-LABEL: test_extendb32:
 
   %sxt32 = sext i8 %var to i32
-  store volatile i32 %sxt32, i32* @var32
+  store volatile i32 %sxt32, ptr @var32
 ; CHECK: sxtb {{w[0-9]+}}, {{w[0-9]+}}
 
 ; N.b. this doesn't actually produce a bitfield instruction at the
 ; moment, but it's still a good test to have and the semantics are
 ; correct.
   %uxt32 = zext i8 %var to i32
-  store volatile i32 %uxt32, i32* @var32
+  store volatile i32 %uxt32, ptr @var32
 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xff
   ret void
 }
@@ -23,14 +23,14 @@ define dso_local void @test_extendb64(i8 %var) {
 ; CHECK-LABEL: test_extendb64:
 
   %sxt64 = sext i8 %var to i64
-  store volatile i64 %sxt64, i64* @var64
+  store volatile i64 %sxt64, ptr @var64
 ; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
 
 ; N.b. this doesn't actually produce a bitfield instruction at the
 ; moment, but it's still a good test to have and the semantics are
 ; correct.
   %uxt64 = zext i8 %var to i64
-  store volatile i64 %uxt64, i64* @var64
+  store volatile i64 %uxt64, ptr @var64
 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xff
   ret void
 }
@@ -39,14 +39,14 @@ define dso_local void @test_extendh32(i16 %var) {
 ; CHECK-LABEL: test_extendh32:
 
   %sxt32 = sext i16 %var to i32
-  store volatile i32 %sxt32, i32* @var32
+  store volatile i32 %sxt32, ptr @var32
 ; CHECK: sxth {{w[0-9]+}}, {{w[0-9]+}}
 
 ; N.b. this doesn't actually produce a bitfield instruction at the
 ; moment, but it's still a good test to have and the semantics are
 ; correct.
   %uxt32 = zext i16 %var to i32
-  store volatile i32 %uxt32, i32* @var32
+  store volatile i32 %uxt32, ptr @var32
 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xffff
   ret void
 }
@@ -55,14 +55,14 @@ define dso_local void @test_extendh64(i16 %var) {
 ; CHECK-LABEL: test_extendh64:
 
   %sxt64 = sext i16 %var to i64
-  store volatile i64 %sxt64, i64* @var64
+  store volatile i64 %sxt64, ptr @var64
 ; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
 
 ; N.b. this doesn't actually produce a bitfield instruction at the
 ; moment, but it's still a good test to have and the semantics are
 ; correct.
   %uxt64 = zext i16 %var to i64
-  store volatile i64 %uxt64, i64* @var64
+  store volatile i64 %uxt64, ptr @var64
 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffff
   ret void
 }
@@ -71,11 +71,11 @@ define dso_local void @test_extendw(i32 %var) {
 ; CHECK-LABEL: test_extendw:
 
   %sxt64 = sext i32 %var to i64
-  store volatile i64 %sxt64, i64* @var64
+  store volatile i64 %sxt64, ptr @var64
 ; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
 
   %uxt64 = zext i32 %var to i64
-  store volatile i64 %uxt64, i64* @var64
+  store volatile i64 %uxt64, ptr @var64
 ; CHECK: mov {{w[0-9]+}}, w0
   ret void
 }
@@ -84,43 +84,43 @@ define dso_local void @test_shifts(i32 %val32, i64 %val64) {
 ; CHECK-LABEL: test_shifts:
 
   %shift1 = ashr i32 %val32, 31
-  store volatile i32 %shift1, i32* @var32
+  store volatile i32 %shift1, ptr @var32
 ; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, #31
 
   %shift2 = lshr i32 %val32, 8
-  store volatile i32 %shift2, i32* @var32
+  store volatile i32 %shift2, ptr @var32
 ; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #8
 
   %shift3 = shl i32 %val32, 1
-  store volatile i32 %shift3, i32* @var32
+  store volatile i32 %shift3, ptr @var32
 ; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #1
 
   %shift4 = ashr i64 %val64, 31
-  store volatile i64 %shift4, i64* @var64
+  store volatile i64 %shift4, ptr @var64
 ; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #31
 
   %shift5 = lshr i64 %val64, 8
-  store volatile i64 %shift5, i64* @var64
+  store volatile i64 %shift5, ptr @var64
 ; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #8
 
   %shift6 = shl i64 %val64, 63
-  store volatile i64 %shift6, i64* @var64
+  store volatile i64 %shift6, ptr @var64
 ; CHECK: lsl {{x[0-9]+}}, {{x[0-9]+}}, #63
 
   %shift7 = ashr i64 %val64, 63
-  store volatile i64 %shift7, i64* @var64
+  store volatile i64 %shift7, ptr @var64
 ; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #63
 
   %shift8 = lshr i64 %val64, 63
-  store volatile i64 %shift8, i64* @var64
+  store volatile i64 %shift8, ptr @var64
 ; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #63
 
   %shift9 = lshr i32 %val32, 31
-  store volatile i32 %shift9, i32* @var32
+  store volatile i32 %shift9, ptr @var32
 ; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #31
 
   %shift10 = shl i32 %val32, 31
-  store volatile i32 %shift10, i32* @var32
+  store volatile i32 %shift10, ptr @var32
 ; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #31
 
   ret void
@@ -135,22 +135,22 @@ define dso_local void @test_sext_inreg_64(i64 %in) {
 ; the bitfield ops.
   %trunc_i1 = trunc i64 %in to i1
   %sext_i1 = sext i1 %trunc_i1 to i64
-  store volatile i64 %sext_i1, i64* @var64
+  store volatile i64 %sext_i1, ptr @var64
 ; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
 
   %trunc_i8 = trunc i64 %in to i8
   %sext_i8 = sext i8 %trunc_i8 to i64
-  store volatile i64 %sext_i8, i64* @var64
+  store volatile i64 %sext_i8, ptr @var64
 ; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
 
   %trunc_i16 = trunc i64 %in to i16
   %sext_i16 = sext i16 %trunc_i16 to i64
-  store volatile i64 %sext_i16, i64* @var64
+  store volatile i64 %sext_i16, ptr @var64
 ; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
 
   %trunc_i32 = trunc i64 %in to i32
   %sext_i32 = sext i32 %trunc_i32 to i64
-  store volatile i64 %sext_i32, i64* @var64
+  store volatile i64 %sext_i32, ptr @var64
 ; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
   ret void
 }
@@ -162,17 +162,17 @@ define dso_local void @test_zext_inreg_64(i64 %in) {
 
   %trunc_i8 = trunc i64 %in to i8
   %zext_i8 = zext i8 %trunc_i8 to i64
-  store volatile i64 %zext_i8, i64* @var64
+  store volatile i64 %zext_i8, ptr @var64
 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xff
 
   %trunc_i16 = trunc i64 %in to i16
   %zext_i16 = zext i16 %trunc_i16 to i64
-  store volatile i64 %zext_i16, i64* @var64
+  store volatile i64 %zext_i16, ptr @var64
 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffff
 
   %trunc_i32 = trunc i64 %in to i32
   %zext_i32 = zext i32 %trunc_i32 to i64
-  store volatile i64 %zext_i32, i64* @var64
+  store volatile i64 %zext_i32, ptr @var64
 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffffffff
 
   ret void
@@ -192,40 +192,40 @@ define dso_local i64 @test_sext_inreg_from_32(i32 %in) {
 }
 
 
-define dso_local i32 @test_ubfx32(i32* %addr) {
+define dso_local i32 @test_ubfx32(ptr %addr) {
 ; CHECK-LABEL: test_ubfx32:
 ; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
 
-   %fields = load i32, i32* %addr
+   %fields = load i32, ptr %addr
    %shifted = lshr i32 %fields, 23
    %masked = and i32 %shifted, 7
    ret i32 %masked
 }
 
-define dso_local i64 @test_ubfx64(i64* %addr) {
+define dso_local i64 @test_ubfx64(ptr %addr) {
 ; CHECK-LABEL: test_ubfx64:
 ; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
-   %fields = load i64, i64* %addr
+   %fields = load i64, ptr %addr
    %shifted = lshr i64 %fields, 25
    %masked = and i64 %shifted, 1023
    ret i64 %masked
 }
 
-define dso_local i32 @test_sbfx32(i32* %addr) {
+define dso_local i32 @test_sbfx32(ptr %addr) {
 ; CHECK-LABEL: test_sbfx32:
 ; CHECK: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
 
-   %fields = load i32, i32* %addr
+   %fields = load i32, ptr %addr
    %shifted = shl i32 %fields, 23
    %extended = ashr i32 %shifted, 29
    ret i32 %extended
 }
 
-define dso_local i64 @test_sbfx64(i64* %addr) {
+define dso_local i64 @test_sbfx64(ptr %addr) {
 ; CHECK-LABEL: test_sbfx64:
 ; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
 
-   %fields = load i64, i64* %addr
+   %fields = load i64, ptr %addr
    %shifted = shl i64 %fields, 1
    %extended = ashr i64 %shifted, 1
    ret i64 %extended

diff  --git a/llvm/test/CodeGen/AArch64/blockaddress.ll b/llvm/test/CodeGen/AArch64/blockaddress.ll
index f254edb26e439..0f8d38dc1cb70 100644
--- a/llvm/test/CodeGen/AArch64/blockaddress.ll
+++ b/llvm/test/CodeGen/AArch64/blockaddress.ll
@@ -2,13 +2,13 @@
 ; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
 ; RUN: llc -code-model=tiny -mtriple=aarch64-none-none-eabi -aarch64-enable-atomic-cfg-tidy=0 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-TINY %s
 
- at addr = global i8* null
+ at addr = global ptr null
 
 define void @test_blockaddress() {
 ; CHECK-LABEL: test_blockaddress:
-  store volatile i8* blockaddress(@test_blockaddress, %block), i8** @addr
-  %val = load volatile i8*, i8** @addr
-  indirectbr i8* %val, [label %block]
+  store volatile ptr blockaddress(@test_blockaddress, %block), ptr @addr
+  %val = load volatile ptr, ptr @addr
+  indirectbr ptr %val, [label %block]
 ; CHECK: adrp [[DEST_HI:x[0-9]+]], [[DEST_LBL:.Ltmp[0-9]+]]
 ; CHECK: add [[DEST:x[0-9]+]], [[DEST_HI]], {{#?}}:lo12:[[DEST_LBL]]
 ; CHECK: str [[DEST]],

diff  --git a/llvm/test/CodeGen/AArch64/bool-loads.ll b/llvm/test/CodeGen/AArch64/bool-loads.ll
index d0bf05ecbe606..0d3b0c6054608 100644
--- a/llvm/test/CodeGen/AArch64/bool-loads.ll
+++ b/llvm/test/CodeGen/AArch64/bool-loads.ll
@@ -5,7 +5,7 @@
 define dso_local i32 @test_sextloadi32() {
 ; CHECK-LABEL: test_sextloadi32
 
-  %val = load i1, i1* @var
+  %val = load i1, ptr @var
   %ret = sext i1 %val to i32
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 ; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1|sbfx w[0-9]+, w[0-9]+, #0, #1}}
@@ -17,7 +17,7 @@ define dso_local i32 @test_sextloadi32() {
 define dso_local i64 @test_sextloadi64() {
 ; CHECK-LABEL: test_sextloadi64
 
-  %val = load i1, i1* @var
+  %val = load i1, ptr @var
   %ret = sext i1 %val to i64
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 ; CHECK: {{sbfx x[0-9]+, x[0-9]+, #0, #1}}
@@ -32,7 +32,7 @@ define dso_local i32 @test_zextloadi32() {
 ; It's not actually necessary that "ret" is next, but as far as LLVM
 ; is concerned only 0 or 1 should be loadable so no extension is
 ; necessary.
-  %val = load i1, i1* @var
+  %val = load i1, ptr @var
   %ret = zext i1 %val to i32
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 
@@ -46,7 +46,7 @@ define dso_local i64 @test_zextloadi64() {
 ; It's not actually necessary that "ret" is next, but as far as LLVM
 ; is concerned only 0 or 1 should be loadable so no extension is
 ; necessary.
-  %val = load i1, i1* @var
+  %val = load i1, ptr @var
   %ret = zext i1 %val to i64
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var]
 

diff  --git a/llvm/test/CodeGen/AArch64/br-cond-not-merge.ll b/llvm/test/CodeGen/AArch64/br-cond-not-merge.ll
index 9edf9e6d82df7..e0ffd81717402 100644
--- a/llvm/test/CodeGen/AArch64/br-cond-not-merge.ll
+++ b/llvm/test/CodeGen/AArch64/br-cond-not-merge.ll
@@ -73,9 +73,9 @@ bb3:
 ; NOOPT: tbnz [[R2]], #0, [[L:\.LBB[0-9_]+]]
 ; NOOPT: ldr [[R3:w[0-9]+]], [sp, #[[SLOT2]]]
 ; NOOPT: tbz [[R3]], #0, [[L]]
-define void @test_cmp_other_block(i32* %p, i1 %c) {
+define void @test_cmp_other_block(ptr %p, i1 %c) {
 entry:
-  %l = load i32, i32* %p
+  %l = load i32, ptr %p
   %cmp = icmp sgt i32 %l, 0
   br label %bb1
 

diff  --git a/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll b/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll
index 2ac9e9043339c..3ca6bab31c955 100644
--- a/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll
+++ b/llvm/test/CodeGen/AArch64/br-to-eh-lpad.ll
@@ -7,15 +7,15 @@
 ; that case, the machine verifier, which relies on analyzing branches for this
 ; kind of verification, is unable to check anything, so accepts the CFG.
 
-define void @test_branch_to_landingpad() personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*) {
+define void @test_branch_to_landingpad() personality ptr @__objc_personality_v0 {
 entry:
   br i1 undef, label %if.end50.thread, label %if.then6
 
 lpad:
-  %0 = landingpad { i8*, i32 }
-          catch %struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765* @"OBJC_EHTYPE_$_NSString"
-          catch %struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765* @OBJC_EHTYPE_id
-          catch i8* null
+  %0 = landingpad { ptr, i32 }
+          catch ptr @"OBJC_EHTYPE_$_NSString"
+          catch ptr @OBJC_EHTYPE_id
+          catch ptr null
   br i1 undef, label %invoke.cont33, label %catch.fallthrough
 
 catch.fallthrough:
@@ -30,12 +30,12 @@ invoke.cont7:
   unreachable
 
 if.end50.thread:
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 125)
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 128)
+  tail call void (ptr, ...) @printf(ptr @.str1, i32 125)
+  tail call void (ptr, ...) @printf(ptr @.str1, i32 128)
   unreachable
 
 invoke.cont33:
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 119)
+  tail call void (ptr, ...) @printf(ptr @.str1, i32 119)
   unreachable
 
 invoke.cont41:
@@ -46,27 +46,27 @@ invoke.cont43:
   unreachable
 
 lpad40:
-  %1 = landingpad { i8*, i32 }
-          catch i8* null
+  %1 = landingpad { ptr, i32 }
+          catch ptr null
   br label %finally.catchall
 
 finally.catchall:
-  tail call void (i8*, ...) @printf(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @.str1, i64 0, i64 0), i32 125)
+  tail call void (ptr, ...) @printf(ptr @.str1, i32 125)
   unreachable
 }
 
-%struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765 = type { i8**, i8*, %struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764* }
-%struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764 = type { %struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764*, %struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764*, %struct._objc_cache.0.117.182.273.338.481.507.520.559.585.611.754*, i8* (i8*, i8*)**, %struct._class_ro_t.9.126.191.282.347.490.516.529.568.594.620.763* }
+%struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765 = type { ptr, ptr, ptr }
+%struct._class_t.10.127.192.283.348.491.517.530.569.595.621.764 = type { ptr, ptr, ptr, ptr, ptr }
 %struct._objc_cache.0.117.182.273.338.481.507.520.559.585.611.754 = type opaque
-%struct._class_ro_t.9.126.191.282.347.490.516.529.568.594.620.763 = type { i32, i32, i32, i8*, i8*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct._objc_protocol_list.6.123.188.279.344.487.513.526.565.591.617.760*, %struct._ivar_list_t.8.125.190.281.346.489.515.528.567.593.619.762*, i8*, %struct._prop_list_t.4.121.186.277.342.485.511.524.563.589.615.758* }
+%struct._class_ro_t.9.126.191.282.347.490.516.529.568.594.620.763 = type { i32, i32, i32, ptr, ptr, ptr, ptr, ptr, ptr, ptr }
 %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756 = type { i32, i32, [0 x %struct._objc_method.1.118.183.274.339.482.508.521.560.586.612.755] }
-%struct._objc_method.1.118.183.274.339.482.508.521.560.586.612.755 = type { i8*, i8*, i8* }
-%struct._objc_protocol_list.6.123.188.279.344.487.513.526.565.591.617.760 = type { i64, [0 x %struct._protocol_t.5.122.187.278.343.486.512.525.564.590.616.759*] }
-%struct._protocol_t.5.122.187.278.343.486.512.525.564.590.616.759 = type { i8*, i8*, %struct._objc_protocol_list.6.123.188.279.344.487.513.526.565.591.617.760*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct.__method_list_t.2.119.184.275.340.483.509.522.561.587.613.756*, %struct._prop_list_t.4.121.186.277.342.485.511.524.563.589.615.758*, i32, i32, i8** }
+%struct._objc_method.1.118.183.274.339.482.508.521.560.586.612.755 = type { ptr, ptr, ptr }
+%struct._objc_protocol_list.6.123.188.279.344.487.513.526.565.591.617.760 = type { i64, [0 x ptr] }
+%struct._protocol_t.5.122.187.278.343.486.512.525.564.590.616.759 = type { ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, i32, i32, ptr }
 %struct._ivar_list_t.8.125.190.281.346.489.515.528.567.593.619.762 = type { i32, i32, [0 x %struct._ivar_t.7.124.189.280.345.488.514.527.566.592.618.761] }
-%struct._ivar_t.7.124.189.280.345.488.514.527.566.592.618.761 = type { i32*, i8*, i8*, i32, i32 }
+%struct._ivar_t.7.124.189.280.345.488.514.527.566.592.618.761 = type { ptr, ptr, ptr, i32, i32 }
 %struct._prop_list_t.4.121.186.277.342.485.511.524.563.589.615.758 = type { i32, i32, [0 x %struct._prop_t.3.120.185.276.341.484.510.523.562.588.614.757] }
-%struct._prop_t.3.120.185.276.341.484.510.523.562.588.614.757 = type { i8*, i8* }
+%struct._prop_t.3.120.185.276.341.484.510.523.562.588.614.757 = type { ptr, ptr }
 
 @.str1 = external unnamed_addr constant [17 x i8], align 1
 @OBJC_EHTYPE_id = external global %struct._objc_typeinfo.12.129.194.285.350.493.519.532.571.597.623.765
@@ -75,4 +75,4 @@ finally.catchall:
 declare void @objc_exception_throw()
 declare void @objc_exception_rethrow()
 declare i32 @__objc_personality_v0(...)
-declare void @printf(i8* nocapture readonly, ...)
+declare void @printf(ptr nocapture readonly, ...)

diff  --git a/llvm/test/CodeGen/AArch64/br-undef-cond.ll b/llvm/test/CodeGen/AArch64/br-undef-cond.ll
index 12d0da2e4fcd2..785d1c883cdb9 100644
--- a/llvm/test/CodeGen/AArch64/br-undef-cond.ll
+++ b/llvm/test/CodeGen/AArch64/br-undef-cond.ll
@@ -5,9 +5,9 @@
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios"
 
-declare void @bar(i8*)
+declare void @bar(ptr)
 
-define void @foo(i8* %m, i32 %off0) {
+define void @foo(ptr %m, i32 %off0) {
 .thread1653:
   br i1 undef, label %0, label %.thread1880
 
@@ -20,7 +20,7 @@ define void @foo(i8* %m, i32 %off0) {
   ret void
 
 .thread1880:
-  %m1652.ph = phi i8* [ %m, %0 ], [ null, %.thread1653 ]
-  call void @bar(i8* %m1652.ph)
+  %m1652.ph = phi ptr [ %m, %0 ], [ null, %.thread1653 ]
+  call void @bar(ptr %m1652.ph)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/branch-folder-merge-mmos.ll b/llvm/test/CodeGen/AArch64/branch-folder-merge-mmos.ll
index d39260c68d79c..656e7503ba761 100644
--- a/llvm/test/CodeGen/AArch64/branch-folder-merge-mmos.ll
+++ b/llvm/test/CodeGen/AArch64/branch-folder-merge-mmos.ll
@@ -2,18 +2,18 @@
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 
 ; Function Attrs: norecurse nounwind
-define void @foo(i32 %a, i32 %b, float* nocapture %foo_arr) #0 {
+define void @foo(i32 %a, i32 %b, ptr nocapture %foo_arr) #0 {
 ; CHECK: (load (s32) from %ir.arrayidx1.{{i[1-2]}})
 entry:
   %cmp = icmp sgt i32 %a, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %0 = load float, float* %foo_arr, align 4
-  %arrayidx1.i1 = getelementptr inbounds float, float* %foo_arr, i64 1
-  %1 = load float, float* %arrayidx1.i1, align 4
+  %0 = load float, ptr %foo_arr, align 4
+  %arrayidx1.i1 = getelementptr inbounds float, ptr %foo_arr, i64 1
+  %1 = load float, ptr %arrayidx1.i1, align 4
   %sub.i = fsub float %0, %1
-  store float %sub.i, float* %foo_arr, align 4
+  store float %sub.i, ptr %foo_arr, align 4
   br label %if.end3
 
 if.end:                                           ; preds = %entry
@@ -21,11 +21,11 @@ if.end:                                           ; preds = %entry
   br i1 %cmp1, label %if.then2, label %if.end3
 
 if.then2:                                         ; preds = %if.end
-  %2 = load float, float* %foo_arr, align 4
-  %arrayidx1.i2 = getelementptr inbounds float, float* %foo_arr, i64 1
-  %3 = load float, float* %arrayidx1.i2, align 4
+  %2 = load float, ptr %foo_arr, align 4
+  %arrayidx1.i2 = getelementptr inbounds float, ptr %foo_arr, i64 1
+  %3 = load float, ptr %arrayidx1.i2, align 4
   %sub.i3 = fsub float %2, %3
-  store float %sub.i3, float* %foo_arr, align 4
+  store float %sub.i3, ptr %foo_arr, align 4
   br label %if.end3
 
 if.end3:                                          ; preds = %if.then2, %if.end, %if.then

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
index c1d824b9b79e5..d409a0b57a3d7 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
@@ -18,11 +18,11 @@ define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y) align 4 #0 {
   br i1 %1, label %bb1, label %bb2
 
 bb2:
-  store volatile i32 9, i32* undef
+  store volatile i32 9, ptr undef
   ret i32 1
 
 bb1:
-  store volatile i32 42, i32* undef
+  store volatile i32 42, ptr undef
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
index 1e3e16188adf2..fed9734a2251e 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
@@ -31,11 +31,11 @@ bb2:
     "nop
      nop",
     ""() #0
-  store volatile i32 9, i32* undef
+  store volatile i32 9, ptr undef
   ret i32 1
 
 bb1:
-  store volatile i32 42, i32* undef
+  store volatile i32 42, ptr undef
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
index 22d9ffe2cc1ac..9dd865096803e 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
@@ -33,7 +33,7 @@ b2:
   br i1 %c1, label %b7, label %b8
 
 b3:
-  %v1 = load volatile i32, i32* undef, align 4
+  %v1 = load volatile i32, ptr undef, align 4
   %c2 = icmp eq i32 %v1, 0
   br i1 %c2, label %b7, label %b8
 

diff  --git a/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll b/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll
index 702d9dfc8fae7..46290beae594d 100644
--- a/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll
+++ b/llvm/test/CodeGen/AArch64/branch-target-enforcement-indirect-calls.ll
@@ -13,14 +13,14 @@ target triple = "aarch64-arm-none-eabi"
 ; which increases the number of potential ways they could be called, and
 ; weakens the security protections.
 
-define void @bti_disabled(void ()* %p) {
+define void @bti_disabled(ptr %p) {
 entry:
   tail call void %p()
 ; CHECK: br x0
   ret void
 }
 
-define void @bti_enabled(void ()* %p) "branch-target-enforcement"="true" {
+define void @bti_enabled(ptr %p) "branch-target-enforcement"="true" {
 entry:
   tail call void %p()
 ; CHECK: br {{x16|x17}}

diff  --git a/llvm/test/CodeGen/AArch64/breg.ll b/llvm/test/CodeGen/AArch64/breg.ll
index 64adcae774cb5..44d757a913f14 100644
--- a/llvm/test/CodeGen/AArch64/breg.ll
+++ b/llvm/test/CodeGen/AArch64/breg.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s
 
- at stored_label = dso_local global i8* null
+ at stored_label = dso_local global ptr null
 
 define dso_local void @foo() {
 ; CHECK-LABEL: foo:
-  %lab = load i8*, i8** @stored_label
-  indirectbr i8* %lab, [label  %otherlab, label %retlab]
+  %lab = load ptr, ptr @stored_label
+  indirectbr ptr %lab, [label  %otherlab, label %retlab]
 ; CHECK: adrp {{x[0-9]+}}, stored_label
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:stored_label]
 ; CHECK: br {{x[0-9]+}}

diff  --git a/llvm/test/CodeGen/AArch64/bswap-known-bits.ll b/llvm/test/CodeGen/AArch64/bswap-known-bits.ll
index 442caf7d9b8e8..23619e47367d0 100644
--- a/llvm/test/CodeGen/AArch64/bswap-known-bits.ll
+++ b/llvm/test/CodeGen/AArch64/bswap-known-bits.ll
@@ -95,20 +95,20 @@ define i64 @demand_one_byte3(i64 %x) {
   ret i64 %r
 }
 
-define void @demand_one_loaded_byte(i64* %xp, i32* %yp) {
+define void @demand_one_loaded_byte(ptr %xp, ptr %yp) {
 ; CHECK-LABEL: demand_one_loaded_byte:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0, #4]
 ; CHECK-NEXT:    strb w8, [x1]
 ; CHECK-NEXT:    ret
-  %x = load i64, i64* %xp, align 8
+  %x = load i64, ptr %xp, align 8
   %x_zzzz7654 = lshr i64 %x, 32
   %x_z7654zzz = shl nuw nsw i64 %x_zzzz7654, 24
   %x_4zzz = trunc i64 %x_z7654zzz to i32
-  %y = load i32, i32* %yp, align 4
+  %y = load i32, ptr %yp, align 4
   %y_321z = and i32 %y, -256
   %x_zzz4 = call i32 @llvm.bswap.i32(i32 %x_4zzz)
   %r = or i32 %x_zzz4, %y_321z
-  store i32 %r, i32* %yp, align 4
+  store i32 %r, ptr %yp, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll b/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll
index 20749f3efd47b..0feda1a05ef0a 100644
--- a/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll
+++ b/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll
@@ -5,7 +5,7 @@ target triple = "aarch64-unknown-unknown-eabi"
 ; Function Attrs: nounwind
 define dso_local void @f(i64 %v) local_unnamed_addr #0 {
 entry:
-  %call = tail call i32 bitcast (i32 (...)* @test to i32 ()*)() #0
+  %call = tail call i32 @test() #0
   %and = and i32 %call, 2
   %cmp = icmp eq i32 %and, 0
   br i1 %cmp, label %if.then, label %if.else
@@ -20,19 +20,19 @@ if.then:                                          ; preds = %entry
   ]
 
 sw.bb:                                            ; preds = %if.then
-  tail call void bitcast (void (...)* @g0 to void ()*)() #0
+  tail call void @g0() #0
   br label %sw.bb1
 
 sw.bb1:                                           ; preds = %if.then, %sw.bb
-  tail call void bitcast (void (...)* @g1 to void ()*)() #0
+  tail call void @g1() #0
   br label %sw.bb2
 
 sw.bb2:                                           ; preds = %if.then, %sw.bb1
-  tail call void bitcast (void (...)* @g2 to void ()*)() #0
+  tail call void @g2() #0
   br label %sw.bb3
 
 sw.bb3:                                           ; preds = %if.then, %sw.bb2
-  tail call void bitcast (void (...)* @g3 to void ()*)() #0
+  tail call void @g3() #0
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %sw.bb3, %if.then
@@ -40,7 +40,7 @@ sw.epilog:                                        ; preds = %sw.bb3, %if.then
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  tail call void bitcast (void (...)* @e to void ()*)() #0
+  tail call void @e() #0
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %sw.epilog

diff  --git a/llvm/test/CodeGen/AArch64/build-one-lane.ll b/llvm/test/CodeGen/AArch64/build-one-lane.ll
index 46b53de71d1c5..8660c709e79ae 100644
--- a/llvm/test/CodeGen/AArch64/build-one-lane.ll
+++ b/llvm/test/CodeGen/AArch64/build-one-lane.ll
@@ -189,7 +189,7 @@ define <2 x i64> @v2i64m(i64 %t, i64 %s) nounwind {
 
 ; Check that building up a vector w/ some constants initializes efficiently.
 
-define void @v8i8st(<8 x i8>* %p, i8 %s) nounwind {
+define void @v8i8st(ptr %p, i8 %s) nounwind {
 ; CHECK-LABEL: v8i8st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.8b, #1
@@ -197,11 +197,11 @@ define void @v8i8st(<8 x i8>* %p, i8 %s) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 undef>, i8 %s, i32 7
-  store <8 x i8> %v, <8 x i8>* %p, align 8
+  store <8 x i8> %v, ptr %p, align 8
   ret void
 }
 
-define void @v16i8st(<16 x i8>* %p, i8 %s) nounwind {
+define void @v16i8st(ptr %p, i8 %s) nounwind {
 ; CHECK-LABEL: v16i8st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.16b, #128
@@ -209,11 +209,11 @@ define void @v16i8st(<16 x i8>* %p, i8 %s) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <16 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 undef>, i8 %s, i32 15
-  store <16 x i8> %v, <16 x i8>* %p, align 16
+  store <16 x i8> %v, ptr %p, align 16
   ret void
 }
 
-define void @v4i16st(<4 x i16>* %p, i16 %s) nounwind {
+define void @v4i16st(ptr %p, i16 %s) nounwind {
 ; CHECK-LABEL: v4i16st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.4h, #85, lsl #8
@@ -221,11 +221,11 @@ define void @v4i16st(<4 x i16>* %p, i16 %s) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <4 x i16> <i16 21760, i16 21760, i16 21760, i16 undef>, i16 %s, i32 3
-  store <4 x i16> %v, <4 x i16>* %p, align 8
+  store <4 x i16> %v, ptr %p, align 8
   ret void
 }
 
-define void @v8i16st(<8 x i16>* %p, i16 %s) nounwind {
+define void @v8i16st(ptr %p, i16 %s) nounwind {
 ; CHECK-LABEL: v8i16st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvni v0.8h, #85, lsl #8
@@ -233,11 +233,11 @@ define void @v8i16st(<8 x i16>* %p, i16 %s) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <8 x i16> <i16 -21761, i16 -21761, i16 -21761, i16 -21761, i16 -21761, i16 -21761, i16 -21761, i16 undef>, i16 %s, i32 7
-  store <8 x i16> %v, <8 x i16>* %p, align 16
+  store <8 x i16> %v, ptr %p, align 16
   ret void
 }
 
-define void @v2i32st(<2 x i32>* %p, i32 %s) nounwind {
+define void @v2i32st(ptr %p, i32 %s) nounwind {
 ; CHECK-LABEL: v2i32st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.2s, #15, lsl #16
@@ -245,11 +245,11 @@ define void @v2i32st(<2 x i32>* %p, i32 %s) nounwind {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <2 x i32> <i32 983040, i32 undef>, i32 %s, i32 1
-  store <2 x i32> %v, <2 x i32>* %p, align 8
+  store <2 x i32> %v, ptr %p, align 8
   ret void
 }
 
-define void @v4i32st(<4 x i32>* %p, i32 %s) nounwind {
+define void @v4i32st(ptr %p, i32 %s) nounwind {
 ; CHECK-LABEL: v4i32st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v0.4s, #248, msl #16
@@ -257,11 +257,11 @@ define void @v4i32st(<4 x i32>* %p, i32 %s) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <4 x i32> <i32 16318463, i32 16318463, i32 16318463, i32 undef>, i32 %s, i32 3
-  store <4 x i32> %v, <4 x i32>* %p, align 16
+  store <4 x i32> %v, ptr %p, align 16
   ret void
 }
 
-define void @v2i64st(<2 x i64>* %p, i64 %s) nounwind {
+define void @v2i64st(ptr %p, i64 %s) nounwind {
 ; CHECK-LABEL: v2i64st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov v0.2d, #-2.00000000
@@ -269,11 +269,11 @@ define void @v2i64st(<2 x i64>* %p, i64 %s) nounwind {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <2 x i64> <i64 13835058055282163712, i64 undef>, i64 %s, i32 1
-  store <2 x i64> %v, <2 x i64>* %p, align 16
+  store <2 x i64> %v, ptr %p, align 16
   ret void
 }
 
-define void @v2f32st(<2 x float>* %p, float %s) nounwind {
+define void @v2f32st(ptr %p, float %s) nounwind {
 ; CHECK-LABEL: v2f32st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v1.2s, #64, lsl #24
@@ -282,11 +282,11 @@ define void @v2f32st(<2 x float>* %p, float %s) nounwind {
 ; CHECK-NEXT:    str d1, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <2 x float> <float 2.0, float undef>, float %s, i32 1
-  store <2 x float> %v, <2 x float>* %p, align 8
+  store <2 x float> %v, ptr %p, align 8
   ret void
 }
 
-define void @v4f32st(<4 x float>* %p, float %s) nounwind {
+define void @v4f32st(ptr %p, float %s) nounwind {
 ; CHECK-LABEL: v4f32st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v1.4s, #192, lsl #24
@@ -295,11 +295,11 @@ define void @v4f32st(<4 x float>* %p, float %s) nounwind {
 ; CHECK-NEXT:    str q1, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <4 x float> <float -2.0, float -2.0, float -2.0, float undef>, float %s, i32 3
-  store <4 x float> %v, <4 x float>* %p, align 16
+  store <4 x float> %v, ptr %p, align 16
   ret void
 }
 
-define void @v2f64st(<2 x double>* %p, double %s) nounwind {
+define void @v2f64st(ptr %p, double %s) nounwind {
 ; CHECK-LABEL: v2f64st:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov v1.2d, #2.00000000
@@ -308,7 +308,7 @@ define void @v2f64st(<2 x double>* %p, double %s) nounwind {
 ; CHECK-NEXT:    str q1, [x0]
 ; CHECK-NEXT:    ret
   %v = insertelement <2 x double> <double 2.0, double undef>, double %s, i32 1
-  store <2 x double> %v, <2 x double>* %p, align 16
+  store <2 x double> %v, ptr %p, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/build-pair-isel.ll b/llvm/test/CodeGen/AArch64/build-pair-isel.ll
index 970a2c69343f5..cd68bde0f8da7 100644
--- a/llvm/test/CodeGen/AArch64/build-pair-isel.ll
+++ b/llvm/test/CodeGen/AArch64/build-pair-isel.ll
@@ -19,6 +19,6 @@ define void @compare_and_swap128() {
 ; CHECK-NEXT:    str x8, [x9]
 ; CHECK-NEXT:    ret
   %1 = call i128 asm sideeffect "nop", "=r,~{memory}"()
-  store i128 %1, i128* undef, align 16
+  store i128 %1, ptr undef, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/byval-type.ll b/llvm/test/CodeGen/AArch64/byval-type.ll
index d49ac16f8e155..472f05ceeb0cd 100644
--- a/llvm/test/CodeGen/AArch64/byval-type.ll
+++ b/llvm/test/CodeGen/AArch64/byval-type.ll
@@ -1,37 +1,37 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s
 
-define i8 @byval_match(i8* byval(i8) align 1, i8* byval(i8) %ptr) {
+define i8 @byval_match(ptr byval(i8) align 1, ptr byval(i8) %ptr) {
 ; CHECK-LABEL: byval_match:
 ; CHECK: ldrb w0, [sp, #8]
-  %res = load i8, i8* %ptr
+  %res = load i8, ptr %ptr
   ret i8 %res
 }
 
-define void @caller_match(i8* %p0, i8* %p1) {
+define void @caller_match(ptr %p0, ptr %p1) {
 ; CHECK-LABEL: caller_match:
 ; CHECK: ldrb [[P1:w[0-9]+]], [x1]
 ; CHECK: strb [[P1]], [sp, #8]
 ; CHECK: ldrb [[P0:w[0-9]+]], [x0]
 ; CHECK: strb [[P0]], [sp]
 ; CHECK: bl byval_match
-  call i8 @byval_match(i8* byval(i8) align 1 %p0, i8* byval(i8) %p1)
+  call i8 @byval_match(ptr byval(i8) align 1 %p0, ptr byval(i8) %p1)
   ret void
 }
 
-define i8 @byval_large([3 x i64]* byval([3 x i64]) align 8, i8* byval(i8) %ptr) {
+define i8 @byval_large(ptr byval([3 x i64]) align 8, ptr byval(i8) %ptr) {
 ; CHECK-LABEL: byval_large:
 ; CHECK: ldrb w0, [sp, #24]
-  %res = load i8, i8* %ptr
+  %res = load i8, ptr %ptr
   ret i8 %res
 }
 
-define void @caller_large([3 x i64]* %p0, i8* %p1) {
+define void @caller_large(ptr %p0, ptr %p1) {
 ; CHECK-LABEL: caller_large:
 ; CHECK: ldr [[P0HI:x[0-9]+]], [x0, #16]
 ; CHECK: ldr [[P0LO:q[0-9]+]], [x0]
 ; CHECK: str [[P0HI]], [sp, #16]
 ; CHECK: str [[P0LO]], [sp]
 ; CHECK: bl byval_large
-  call i8 @byval_large([3 x i64]* byval([3 x i64]) align 8 %p0, i8* byval(i8) %p1)
+  call i8 @byval_large(ptr byval([3 x i64]) align 8 %p0, ptr byval(i8) %p1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/call-rv-marker.ll b/llvm/test/CodeGen/AArch64/call-rv-marker.ll
index 4c978392de169..fc06809ad09fb 100644
--- a/llvm/test/CodeGen/AArch64/call-rv-marker.ll
+++ b/llvm/test/CodeGen/AArch64/call-rv-marker.ll
@@ -4,46 +4,46 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios"
 
-declare i8* @foo0(i32)
-declare i8* @foo1()
+declare ptr @foo0(i32)
+declare ptr @foo1()
 
-declare void @llvm.objc.release(i8*)
-declare void @objc_object(i8*)
+declare void @llvm.objc.release(ptr)
+declare void @objc_object(ptr)
 
-declare void @foo2(i8*)
+declare void @foo2(ptr)
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 
-declare %struct.S* @_ZN1SD1Ev(%struct.S* nonnull dereferenceable(1))
+declare ptr @_ZN1SD1Ev(ptr nonnull dereferenceable(1))
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 
 %struct.S = type { i8 }
 
- at g = dso_local global i8* null, align 8
- at fptr = dso_local global i8* ()* null, align 8
+ at g = dso_local global ptr null, align 8
+ at fptr = dso_local global ptr null, align 8
 
-define dso_local i8* @rv_marker_1_retain() {
+define dso_local ptr @rv_marker_1_retain() {
 ; CHECK-LABEL: _rv_marker_1_retain:
 ; CHECK:         bl _foo1
 ; CHECK-NEXT:    mov x29, x29
 ; CHECK-NEXT:    bl _objc_retainAutoreleasedReturnValue
 ;
 entry:
-  %call = call i8* @foo1() [ "clang.arc.attachedcall"(i8* (i8*)* @objc_retainAutoreleasedReturnValue) ]
-  ret i8* %call
+  %call = call ptr @foo1() [ "clang.arc.attachedcall"(ptr @objc_retainAutoreleasedReturnValue) ]
+  ret ptr %call
 }
 
-define dso_local i8* @rv_marker_1_unsafeClaim() {
+define dso_local ptr @rv_marker_1_unsafeClaim() {
 ; CHECK-LABEL: _rv_marker_1_unsafeClaim:
 ; CHECK:         bl _foo1
 ; CHECK-NEXT:    mov x29, x29
 ; CHECK-NEXT:    bl _objc_unsafeClaimAutoreleasedReturnValue
 ;
 entry:
-  %call = call i8* @foo1() [ "clang.arc.attachedcall"(i8* (i8*)* @objc_unsafeClaimAutoreleasedReturnValue) ]
-  ret i8* %call
+  %call = call ptr @foo1() [ "clang.arc.attachedcall"(ptr @objc_unsafeClaimAutoreleasedReturnValue) ]
+  ret ptr %call
 }
 
 define dso_local void @rv_marker_2_select(i32 %c) {
@@ -59,34 +59,34 @@ define dso_local void @rv_marker_2_select(i32 %c) {
 entry:
   %tobool.not = icmp eq i32 %c, 0
   %.sink = select i1 %tobool.not, i32 2, i32 1
-  %call1 = call i8* @foo0(i32 %.sink) [ "clang.arc.attachedcall"(i8* (i8*)* @objc_retainAutoreleasedReturnValue) ]
-  tail call void @foo2(i8* %call1)
+  %call1 = call ptr @foo0(i32 %.sink) [ "clang.arc.attachedcall"(ptr @objc_retainAutoreleasedReturnValue) ]
+  tail call void @foo2(ptr %call1)
   ret void
 }
 
-define dso_local void @rv_marker_3() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @rv_marker_3() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: _rv_marker_3:
 ; CHECK:         bl _foo1
 ; CHECK-NEXT:    mov x29, x29
 ; CHECK-NEXT:    bl _objc_retainAutoreleasedReturnValue
 ;
 entry:
-  %call = call i8* @foo1() [ "clang.arc.attachedcall"(i8* (i8*)* @objc_retainAutoreleasedReturnValue) ]
-  invoke void @objc_object(i8* %call) #5
+  %call = call ptr @foo1() [ "clang.arc.attachedcall"(ptr @objc_retainAutoreleasedReturnValue) ]
+  invoke void @objc_object(ptr %call) #5
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %entry
-  tail call void @llvm.objc.release(i8* %call)
+  tail call void @llvm.objc.release(ptr %call)
   ret void
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
-  tail call void @llvm.objc.release(i8* %call)
-  resume { i8*, i32 } %0
+  tail call void @llvm.objc.release(ptr %call)
+  resume { ptr, i32 } %0
 }
 
-define dso_local void @rv_marker_4() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local void @rv_marker_4() personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: _rv_marker_4:
 ; CHECK:       Ltmp3:
 ; CHECK-NEXT:    bl _foo1
@@ -96,53 +96,52 @@ define dso_local void @rv_marker_4() personality i8* bitcast (i32 (...)* @__gxx_
 ;
 entry:
   %s = alloca %struct.S, align 1
-  %0 = getelementptr inbounds %struct.S, %struct.S* %s, i64 0, i32 0
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %0) #2
-  %call = invoke i8* @foo1() [ "clang.arc.attachedcall"(i8* (i8*)* @objc_retainAutoreleasedReturnValue) ]
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %s) #2
+  %call = invoke ptr @foo1() [ "clang.arc.attachedcall"(ptr @objc_retainAutoreleasedReturnValue) ]
           to label %invoke.cont unwind label %lpad
 
 invoke.cont:                                      ; preds = %entry
-  invoke void @objc_object(i8* %call) #5
+  invoke void @objc_object(ptr %call) #5
           to label %invoke.cont2 unwind label %lpad1
 
 invoke.cont2:                                     ; preds = %invoke.cont
-  tail call void @llvm.objc.release(i8* %call)
-  %call3 = call %struct.S* @_ZN1SD1Ev(%struct.S* nonnull dereferenceable(1) %s)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %0)
+  tail call void @llvm.objc.release(ptr %call)
+  %call3 = call ptr @_ZN1SD1Ev(ptr nonnull dereferenceable(1) %s)
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s)
   ret void
 
 lpad:                                             ; preds = %entry
-  %1 = landingpad { i8*, i32 }
+  %0 = landingpad { ptr, i32 }
           cleanup
   br label %ehcleanup
 
 lpad1:                                            ; preds = %invoke.cont
-  %2 = landingpad { i8*, i32 }
+  %1 = landingpad { ptr, i32 }
           cleanup
-  tail call void @llvm.objc.release(i8* %call)
+  tail call void @llvm.objc.release(ptr %call)
   br label %ehcleanup
 
 ehcleanup:                                        ; preds = %lpad1, %lpad
-  %.pn = phi { i8*, i32 } [ %2, %lpad1 ], [ %1, %lpad ]
-  %call4 = call %struct.S* @_ZN1SD1Ev(%struct.S* nonnull dereferenceable(1) %s)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %0)
-  resume { i8*, i32 } %.pn
+  %.pn = phi { ptr, i32 } [ %1, %lpad1 ], [ %0, %lpad ]
+  %call4 = call ptr @_ZN1SD1Ev(ptr nonnull dereferenceable(1) %s)
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s)
+  resume { ptr, i32 } %.pn
 }
 
-define dso_local i8* @rv_marker_5_indirect_call() {
+define dso_local ptr @rv_marker_5_indirect_call() {
 ; CHECK-LABEL: _rv_marker_5_indirect_call:
 ; CHECK:         ldr [[ADDR:x[0-9]+]], [
 ; CHECK-NEXT:    blr [[ADDR]]
 ; CHECK-NEXT:    mov x29, x29
 ; CHECK-NEXT:    bl _objc_retainAutoreleasedReturnValue
 entry:
-  %0 = load i8* ()*, i8* ()** @fptr, align 8
-  %call = call i8* %0() [ "clang.arc.attachedcall"(i8* (i8*)* @objc_retainAutoreleasedReturnValue) ]
-  tail call void @foo2(i8* %call)
-  ret i8* %call
+  %0 = load ptr, ptr @fptr, align 8
+  %call = call ptr %0() [ "clang.arc.attachedcall"(ptr @objc_retainAutoreleasedReturnValue) ]
+  tail call void @foo2(ptr %call)
+  ret ptr %call
 }
 
-declare i8* @foo(i64, i64, i64)
+declare ptr @foo(i64, i64, i64)
 
 define dso_local void @rv_marker_multiarg(i64 %a, i64 %b, i64 %c) {
 ; CHECK-LABEL: _rv_marker_multiarg:
@@ -152,10 +151,10 @@ define dso_local void @rv_marker_multiarg(i64 %a, i64 %b, i64 %c) {
 ; CHECK-NEXT:    bl  _foo
 ; CHECK-NEXT:    mov x29, x29
 ; CHECK-NEXT:    bl _objc_retainAutoreleasedReturnValue
-  call i8* @foo(i64 %c, i64 %b, i64 %a) [ "clang.arc.attachedcall"(i8* (i8*)* @objc_retainAutoreleasedReturnValue) ]
+  call ptr @foo(i64 %c, i64 %b, i64 %a) [ "clang.arc.attachedcall"(ptr @objc_retainAutoreleasedReturnValue) ]
   ret void
 }
 
-declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8*)
+declare ptr @objc_retainAutoreleasedReturnValue(ptr)
+declare ptr @objc_unsafeClaimAutoreleasedReturnValue(ptr)
 declare i32 @__gxx_personality_v0(...)

diff  --git a/llvm/test/CodeGen/AArch64/callbr-asm-label.ll b/llvm/test/CodeGen/AArch64/callbr-asm-label.ll
index 7b066a02e0387..1818f94a831b9 100644
--- a/llvm/test/CodeGen/AArch64/callbr-asm-label.ll
+++ b/llvm/test/CodeGen/AArch64/callbr-asm-label.ll
@@ -23,7 +23,7 @@ cleanup:
 define void @test2() {
 ; CHECK-LABEL: test2:
 entry:
-  %0 = load i32, i32* @X, align 4
+  %0 = load i32, ptr @X, align 4
   %and = and i32 %0, 1
   %tobool = icmp eq i32 %and, 0
   br i1 %tobool, label %if.end10, label %if.then
@@ -36,11 +36,11 @@ if.then:
           to label %if.then4 [label %if.end6]
 
 if.then4:
-  %call5 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
+  %call5 = tail call i32 @g()
   br label %if.end6
 
 if.end6:
-  %.pre = load i32, i32* @X, align 4
+  %.pre = load i32, ptr @X, align 4
   %.pre13 = and i32 %.pre, 1
   %phitmp = icmp eq i32 %.pre13, 0
   br i1 %phitmp, label %if.end10, label %if.then9

diff  --git a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
index 4bdcda7b5da37..073429ea78902 100644
--- a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
+++ b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
@@ -2,7 +2,7 @@
 ; RUN:  | llvm-objdump --no-print-imm-hex --triple=aarch64-unknown-linux-gnu -d - \
 ; RUN:  | FileCheck %s
 
-%struct.c = type { i1 (...)* }
+%struct.c = type { ptr }
 
 @l = common hidden local_unnamed_addr global i32 0, align 4
 
@@ -15,19 +15,19 @@
 ; CHECK-NEXT:    ldr x30, [sp], #16
 ; CHECK-NEXT:    ret
 define hidden i32 @test1() {
-  %1 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
+  %1 = tail call i32 @g()
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %3, label %5
 
 3:                                                ; preds = %0
-  callbr void asm sideeffect "1: nop\0A\09.quad a\0A\09b ${1:l}\0A\09.quad ${0:c}", "i,!i"(i32* null)
+  callbr void asm sideeffect "1: nop\0A\09.quad a\0A\09b ${1:l}\0A\09.quad ${0:c}", "i,!i"(ptr null)
           to label %4 [label %7]
 
 4:                                                ; preds = %3
   br label %7
 
 5:                                                ; preds = %0
-  %6 = tail call i32 bitcast (i32 (...)* @i to i32 ()*)()
+  %6 = tail call i32 @i()
   br label %7
 
 7:                                                ; preds = %3, %4, %5
@@ -45,21 +45,21 @@ declare dso_local i32 @i(...) local_unnamed_addr
 ; CHECK-LABEL: <$x.6>:
 ; CHECK-NEXT:    b {{.*}} <test2+0x18>
 define hidden i32 @test2() local_unnamed_addr {
-  %1 = load i32, i32* @l, align 4
+  %1 = load i32, ptr @l, align 4
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %10, label %3
 
 3:                                                ; preds = %0
-  %4 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
+  %4 = tail call i32 @g()
   %5 = icmp eq i32 %4, 0
   br i1 %5, label %6, label %7
 
 6:                                                ; preds = %3
-  callbr void asm sideeffect "1: nop\0A\09.quad b\0A\09b ${1:l}\0A\09.quad ${0:c}", "i,!i"(i32* null)
+  callbr void asm sideeffect "1: nop\0A\09.quad b\0A\09b ${1:l}\0A\09.quad ${0:c}", "i,!i"(ptr null)
           to label %10 [label %7]
 
 7:                                                ; preds = %3
-  %8 = tail call i32 bitcast (i32 (...)* @i to i32 ()*)()
+  %8 = tail call i32 @i()
   br label %10
 
 9:                                                ; preds = %6
@@ -77,19 +77,19 @@ define hidden i32 @test2() local_unnamed_addr {
 ; CHECK-NEXT:    ldr x30, [sp], #16
 ; CHECK-NEXT:    ret
 define internal i1 @test3() {
-  %1 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
+  %1 = tail call i32 @g()
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %3, label %5
 
 3:                                                ; preds = %0
-  callbr void asm sideeffect "1: nop\0A\09.quad c\0A\09b ${1:l}\0A\09.quad ${0:c}", "i,!i"(i32* null)
+  callbr void asm sideeffect "1: nop\0A\09.quad c\0A\09b ${1:l}\0A\09.quad ${0:c}", "i,!i"(ptr null)
           to label %4 [label %8]
 
 4:                                                ; preds = %3
   br label %8
 
 5:                                                ; preds = %0
-  %6 = tail call i32 bitcast (i32 (...)* @i to i32 ()*)()
+  %6 = tail call i32 @i()
   %7 = icmp ne i32 %6, 0
   br label %8
 

diff  --git a/llvm/test/CodeGen/AArch64/callee-save.ll b/llvm/test/CodeGen/AArch64/callee-save.ll
index 123403988d44a..c466c07f8f574 100644
--- a/llvm/test/CodeGen/AArch64/callee-save.ll
+++ b/llvm/test/CodeGen/AArch64/callee-save.ll
@@ -12,71 +12,71 @@ define void @foo() {
 
   ; Create lots of live variables to exhaust the supply of
   ; caller-saved registers
-  %val1 = load volatile float, float* @var
-  %val2 = load volatile float, float* @var
-  %val3 = load volatile float, float* @var
-  %val4 = load volatile float, float* @var
-  %val5 = load volatile float, float* @var
-  %val6 = load volatile float, float* @var
-  %val7 = load volatile float, float* @var
-  %val8 = load volatile float, float* @var
-  %val9 = load volatile float, float* @var
-  %val10 = load volatile float, float* @var
-  %val11 = load volatile float, float* @var
-  %val12 = load volatile float, float* @var
-  %val13 = load volatile float, float* @var
-  %val14 = load volatile float, float* @var
-  %val15 = load volatile float, float* @var
-  %val16 = load volatile float, float* @var
-  %val17 = load volatile float, float* @var
-  %val18 = load volatile float, float* @var
-  %val19 = load volatile float, float* @var
-  %val20 = load volatile float, float* @var
-  %val21 = load volatile float, float* @var
-  %val22 = load volatile float, float* @var
-  %val23 = load volatile float, float* @var
-  %val24 = load volatile float, float* @var
-  %val25 = load volatile float, float* @var
-  %val26 = load volatile float, float* @var
-  %val27 = load volatile float, float* @var
-  %val28 = load volatile float, float* @var
-  %val29 = load volatile float, float* @var
-  %val30 = load volatile float, float* @var
-  %val31 = load volatile float, float* @var
-  %val32 = load volatile float, float* @var
+  %val1 = load volatile float, ptr @var
+  %val2 = load volatile float, ptr @var
+  %val3 = load volatile float, ptr @var
+  %val4 = load volatile float, ptr @var
+  %val5 = load volatile float, ptr @var
+  %val6 = load volatile float, ptr @var
+  %val7 = load volatile float, ptr @var
+  %val8 = load volatile float, ptr @var
+  %val9 = load volatile float, ptr @var
+  %val10 = load volatile float, ptr @var
+  %val11 = load volatile float, ptr @var
+  %val12 = load volatile float, ptr @var
+  %val13 = load volatile float, ptr @var
+  %val14 = load volatile float, ptr @var
+  %val15 = load volatile float, ptr @var
+  %val16 = load volatile float, ptr @var
+  %val17 = load volatile float, ptr @var
+  %val18 = load volatile float, ptr @var
+  %val19 = load volatile float, ptr @var
+  %val20 = load volatile float, ptr @var
+  %val21 = load volatile float, ptr @var
+  %val22 = load volatile float, ptr @var
+  %val23 = load volatile float, ptr @var
+  %val24 = load volatile float, ptr @var
+  %val25 = load volatile float, ptr @var
+  %val26 = load volatile float, ptr @var
+  %val27 = load volatile float, ptr @var
+  %val28 = load volatile float, ptr @var
+  %val29 = load volatile float, ptr @var
+  %val30 = load volatile float, ptr @var
+  %val31 = load volatile float, ptr @var
+  %val32 = load volatile float, ptr @var
 
-  store volatile float %val1, float* @var
-  store volatile float %val2, float* @var
-  store volatile float %val3, float* @var
-  store volatile float %val4, float* @var
-  store volatile float %val5, float* @var
-  store volatile float %val6, float* @var
-  store volatile float %val7, float* @var
-  store volatile float %val8, float* @var
-  store volatile float %val9, float* @var
-  store volatile float %val10, float* @var
-  store volatile float %val11, float* @var
-  store volatile float %val12, float* @var
-  store volatile float %val13, float* @var
-  store volatile float %val14, float* @var
-  store volatile float %val15, float* @var
-  store volatile float %val16, float* @var
-  store volatile float %val17, float* @var
-  store volatile float %val18, float* @var
-  store volatile float %val19, float* @var
-  store volatile float %val20, float* @var
-  store volatile float %val21, float* @var
-  store volatile float %val22, float* @var
-  store volatile float %val23, float* @var
-  store volatile float %val24, float* @var
-  store volatile float %val25, float* @var
-  store volatile float %val26, float* @var
-  store volatile float %val27, float* @var
-  store volatile float %val28, float* @var
-  store volatile float %val29, float* @var
-  store volatile float %val30, float* @var
-  store volatile float %val31, float* @var
-  store volatile float %val32, float* @var
+  store volatile float %val1, ptr @var
+  store volatile float %val2, ptr @var
+  store volatile float %val3, ptr @var
+  store volatile float %val4, ptr @var
+  store volatile float %val5, ptr @var
+  store volatile float %val6, ptr @var
+  store volatile float %val7, ptr @var
+  store volatile float %val8, ptr @var
+  store volatile float %val9, ptr @var
+  store volatile float %val10, ptr @var
+  store volatile float %val11, ptr @var
+  store volatile float %val12, ptr @var
+  store volatile float %val13, ptr @var
+  store volatile float %val14, ptr @var
+  store volatile float %val15, ptr @var
+  store volatile float %val16, ptr @var
+  store volatile float %val17, ptr @var
+  store volatile float %val18, ptr @var
+  store volatile float %val19, ptr @var
+  store volatile float %val20, ptr @var
+  store volatile float %val21, ptr @var
+  store volatile float %val22, ptr @var
+  store volatile float %val23, ptr @var
+  store volatile float %val24, ptr @var
+  store volatile float %val25, ptr @var
+  store volatile float %val26, ptr @var
+  store volatile float %val27, ptr @var
+  store volatile float %val28, ptr @var
+  store volatile float %val29, ptr @var
+  store volatile float %val30, ptr @var
+  store volatile float %val31, ptr @var
+  store volatile float %val32, ptr @var
 
 ; CHECK: ldp     d9, d8, [sp
 ; CHECK: ldp     d11, d10, [sp

diff  --git a/llvm/test/CodeGen/AArch64/cfguard-checks.ll b/llvm/test/CodeGen/AArch64/cfguard-checks.ll
index 53757fd75b691..1249163280820 100644
--- a/llvm/test/CodeGen/AArch64/cfguard-checks.ll
+++ b/llvm/test/CodeGen/AArch64/cfguard-checks.ll
@@ -11,9 +11,9 @@ declare i32 @target_func()
 ; Test that Control Flow Guard checks are not added on calls with the "guard_nocf" attribute.
 define i32 @func_guard_nocf() {
 entry:
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call i32 %0() #0
   ret i32 %1
 
@@ -29,9 +29,9 @@ attributes #0 = { "guard_nocf" }
 ; Test that Control Flow Guard checks are added even at -O0.
 define i32 @func_optnone_cf() #1 {
 entry:
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call i32 %0()
   ret i32 %1
 
@@ -52,9 +52,9 @@ attributes #1 = { noinline optnone }
 ; Test that Control Flow Guard checks are correctly added in optimized code (common case).
 define i32 @func_cf() {
 entry:
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
   %1 = call i32 %0()
   ret i32 %1
 
@@ -71,20 +71,20 @@ entry:
 
 
 ; Test that Control Flow Guard checks are correctly added on invoke instructions.
-define i32 @func_cf_invoke() personality i8* bitcast (void ()* @h to i8*) {
+define i32 @func_cf_invoke() personality ptr @h {
 entry:
   %0 = alloca i32, align 4
-  %func_ptr = alloca i32 ()*, align 8
-  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
-  %1 = load i32 ()*, i32 ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %1 = load ptr, ptr %func_ptr, align 8
   %2 = invoke i32 %1()
           to label %invoke.cont unwind label %lpad
 invoke.cont:                                      ; preds = %entry
   ret i32 %2
 
 lpad:                                             ; preds = %entry
-  %tmp = landingpad { i8*, i32 }
-          catch i8* null
+  %tmp = landingpad { ptr, i32 }
+          catch ptr null
   ret i32 -1
 
   ; The call to __guard_check_icall_fptr should come immediately before the call to the target function.
@@ -111,23 +111,23 @@ declare void @h()
 define i32 @func_cf_setjmp() {
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
-  store i32 0, i32* %1, align 4
-  store i32 -1, i32* %2, align 4
-  %3 = call i8* @llvm.frameaddress(i32 0)
-  %4 = call i32 @_setjmp(i8* bitcast ([16 x %struct._SETJMP_FLOAT128]* @buf1 to i8*), i8* %3) #2
+  store i32 0, ptr %1, align 4
+  store i32 -1, ptr %2, align 4
+  %3 = call ptr @llvm.frameaddress(i32 0)
+  %4 = call i32 @_setjmp(ptr @buf1, ptr %3) #2
 
   ; CHECK-LABEL: func_cf_setjmp
   ; CHECK:       bl _setjmp
   ; CHECK-NEXT:  $cfgsj_func_cf_setjmp0:
 
-  %5 = call i8* @llvm.frameaddress(i32 0)
-  %6 = call i32 @_setjmp(i8* bitcast ([16 x %struct._SETJMP_FLOAT128]* @buf1 to i8*), i8* %5) #3
+  %5 = call ptr @llvm.frameaddress(i32 0)
+  %6 = call i32 @_setjmp(ptr @buf1, ptr %5) #3
 
   ; CHECK:       bl _setjmp
   ; CHECK-NEXT:  $cfgsj_func_cf_setjmp1:
 
-  store i32 1, i32* %2, align 4
-  %7 = load i32, i32* %2, align 4
+  store i32 1, ptr %2, align 4
+  %7 = load i32, ptr %2, align 4
   ret i32 %7
 
   ; CHECK:       .section .gljmp$y,"dr"
@@ -135,10 +135,10 @@ define i32 @func_cf_setjmp() {
   ; CHECK-NEXT:  .symidx $cfgsj_func_cf_setjmp1
 }
 
-declare i8* @llvm.frameaddress(i32)
+declare ptr @llvm.frameaddress(i32)
 
 ; Function Attrs: returns_twice
-declare dso_local i32 @_setjmp(i8*, i8*) #2
+declare dso_local i32 @_setjmp(ptr, ptr) #2
 
 attributes #2 = { returns_twice }
 attributes #3 = { returns_twice }

diff  --git a/llvm/test/CodeGen/AArch64/cfguard-module-flag.ll b/llvm/test/CodeGen/AArch64/cfguard-module-flag.ll
index 735d5c2dcc992..317ad2c42a3e1 100644
--- a/llvm/test/CodeGen/AArch64/cfguard-module-flag.ll
+++ b/llvm/test/CodeGen/AArch64/cfguard-module-flag.ll
@@ -11,9 +11,9 @@ declare void @target_func()
 
 define void @func_in_module_without_cfguard() #0 {
 entry:
-  %func_ptr = alloca void ()*, align 8
-  store void ()* @target_func, void ()** %func_ptr, align 8
-  %0 = load void ()*, void ()** %func_ptr, align 8
+  %func_ptr = alloca ptr, align 8
+  store ptr @target_func, ptr %func_ptr, align 8
+  %0 = load ptr, ptr %func_ptr, align 8
 
   call void %0()
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/cgp-trivial-phi-node.ll b/llvm/test/CodeGen/AArch64/cgp-trivial-phi-node.ll
index 6e2d4a251abc2..98b820709e829 100644
--- a/llvm/test/CodeGen/AArch64/cgp-trivial-phi-node.ll
+++ b/llvm/test/CodeGen/AArch64/cgp-trivial-phi-node.ll
@@ -1,32 +1,32 @@
 ; Checks that case when GEP is bound to trivial PHI node is correctly handled.
 ; RUN: opt %s -mtriple=aarch64-linux-gnu -codegenprepare -S -o - | FileCheck %s
 
-; CHECK:      define void @crash([65536 x i32]** %s, i32 %n) {
+; CHECK:      define void @crash(ptr %s, i32 %n) {
 ; CHECK-NEXT: entry:
-; CHECK-NEXT:   %struct = load [65536 x i32]*, [65536 x i32]** %s
-; CHECK-NEXT:   %gep0 = getelementptr [65536 x i32], [65536 x i32]* %struct, i64 0, i32 20000
-; CHECK-NEXT:   store i32 %n, i32* %gep0
+; CHECK-NEXT:   %struct = load ptr, ptr %s
+; CHECK-NEXT:   %gep0 = getelementptr [65536 x i32], ptr %struct, i64 0, i32 20000
+; CHECK-NEXT:   store i32 %n, ptr %gep0
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: }
 
-define void @crash([65536 x i32]** %s, i32 %n) {
+define void @crash(ptr %s, i32 %n) {
 entry:
-  %struct = load [65536 x i32]*, [65536 x i32]** %s
+  %struct = load ptr, ptr %s
   %cmp = icmp slt i32 0, %n
   br i1 %cmp, label %baz, label %bar
 baz:
   br label %bar
 
 foo:
-  %gep0 = getelementptr [65536 x i32], [65536 x i32]* %phi2, i64 0, i32 20000
+  %gep0 = getelementptr [65536 x i32], ptr %phi2, i64 0, i32 20000
   br label %st
 
 st:
-  store i32 %n, i32* %gep0
+  store i32 %n, ptr %gep0
   br label %out
 
 bar:
-  %phi2 = phi [65536 x i32]* [ %struct, %baz ], [ %struct, %entry ]
+  %phi2 = phi ptr [ %struct, %baz ], [ %struct, %entry ]
   br label %foo
 out:
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/cgp-usubo.ll b/llvm/test/CodeGen/AArch64/cgp-usubo.ll
index 8a7e301efa826..6b820b635f80e 100644
--- a/llvm/test/CodeGen/AArch64/cgp-usubo.ll
+++ b/llvm/test/CodeGen/AArch64/cgp-usubo.ll
@@ -3,7 +3,7 @@
 
 ; CodeGenPrepare is expected to form overflow intrinsics to improve DAG/isel.
 
-define i1 @usubo_ult_i64(i64 %x, i64 %y, i64* %p) nounwind {
+define i1 @usubo_ult_i64(i64 %x, i64 %y, ptr %p) nounwind {
 ; CHECK-LABEL: usubo_ult_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x0, x1
@@ -11,14 +11,14 @@ define i1 @usubo_ult_i64(i64 %x, i64 %y, i64* %p) nounwind {
 ; CHECK-NEXT:    str x8, [x2]
 ; CHECK-NEXT:    ret
   %s = sub i64 %x, %y
-  store i64 %s, i64* %p
+  store i64 %s, ptr %p
   %ov = icmp ult i64 %x, %y
   ret i1 %ov
 }
 
 ; Verify insertion point for single-BB. Toggle predicate.
 
-define i1 @usubo_ugt_i32(i32 %x, i32 %y, i32* %p) nounwind {
+define i1 @usubo_ugt_i32(i32 %x, i32 %y, ptr %p) nounwind {
 ; CHECK-LABEL: usubo_ugt_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs w8, w0, w1
@@ -27,13 +27,13 @@ define i1 @usubo_ugt_i32(i32 %x, i32 %y, i32* %p) nounwind {
 ; CHECK-NEXT:    ret
   %ov = icmp ugt i32 %y, %x
   %s = sub i32 %x, %y
-  store i32 %s, i32* %p
+  store i32 %s, ptr %p
   ret i1 %ov
 }
 
 ; Constant operand should match.
 
-define i1 @usubo_ugt_constant_op0_i8(i8 %x, i8* %p) nounwind {
+define i1 @usubo_ugt_constant_op0_i8(i8 %x, ptr %p) nounwind {
 ; CHECK-LABEL: usubo_ugt_constant_op0_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
@@ -46,13 +46,13 @@ define i1 @usubo_ugt_constant_op0_i8(i8 %x, i8* %p) nounwind {
 ; CHECK-NEXT:    ret
   %s = sub i8 42, %x
   %ov = icmp ugt i8 %x, 42
-  store i8 %s, i8* %p
+  store i8 %s, ptr %p
   ret i1 %ov
 }
 
 ; Compare with constant operand 0 is canonicalized by commuting, but verify match for non-canonical form.
 
-define i1 @usubo_ult_constant_op0_i16(i16 %x, i16* %p) nounwind {
+define i1 @usubo_ult_constant_op0_i16(i16 %x, ptr %p) nounwind {
 ; CHECK-LABEL: usubo_ult_constant_op0_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff
@@ -65,13 +65,13 @@ define i1 @usubo_ult_constant_op0_i16(i16 %x, i16* %p) nounwind {
 ; CHECK-NEXT:    ret
   %s = sub i16 43, %x
   %ov = icmp ult i16 43, %x
-  store i16 %s, i16* %p
+  store i16 %s, ptr %p
   ret i1 %ov
 }
 
 ; Subtract with constant operand 1 is canonicalized to add.
 
-define i1 @usubo_ult_constant_op1_i16(i16 %x, i16* %p) nounwind {
+define i1 @usubo_ult_constant_op1_i16(i16 %x, ptr %p) nounwind {
 ; CHECK-LABEL: usubo_ult_constant_op1_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff
@@ -83,11 +83,11 @@ define i1 @usubo_ult_constant_op1_i16(i16 %x, i16* %p) nounwind {
 ; CHECK-NEXT:    ret
   %s = add i16 %x, -44
   %ov = icmp ult i16 %x, 44
-  store i16 %s, i16* %p
+  store i16 %s, ptr %p
   ret i1 %ov
 }
 
-define i1 @usubo_ugt_constant_op1_i8(i8 %x, i8* %p) nounwind {
+define i1 @usubo_ugt_constant_op1_i8(i8 %x, ptr %p) nounwind {
 ; CHECK-LABEL: usubo_ugt_constant_op1_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
@@ -99,13 +99,13 @@ define i1 @usubo_ugt_constant_op1_i8(i8 %x, i8* %p) nounwind {
 ; CHECK-NEXT:    ret
   %ov = icmp ugt i8 45, %x
   %s = add i8 %x, -45
-  store i8 %s, i8* %p
+  store i8 %s, ptr %p
   ret i1 %ov
 }
 
 ; Special-case: subtract 1 changes the compare predicate and constant.
 
-define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) nounwind {
+define i1 @usubo_eq_constant1_op1_i32(i32 %x, ptr %p) nounwind {
 ; CHECK-LABEL: usubo_eq_constant1_op1_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp w0, #0
@@ -116,7 +116,7 @@ define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) nounwind {
 ; CHECK-NEXT:    ret
   %s = add i32 %x, -1
   %ov = icmp eq i32 %x, 0
-  store i32 %s, i32* %p
+  store i32 %s, ptr %p
   ret i1 %ov
 }
 
@@ -124,7 +124,7 @@ define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) nounwind {
 
 declare void @call(i1)
 
-define i1 @usubo_ult_sub_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) nounwind {
+define i1 @usubo_ult_sub_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) nounwind {
 ; CHECK-LABEL: usubo_ult_sub_dominates_i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    tbz w3, #0, .LBB7_2
@@ -140,7 +140,7 @@ entry:
 
 t:
   %s = sub i64 %x, %y
-  store i64 %s, i64* %p
+  store i64 %s, ptr %p
   br i1 %cond, label %end, label %f
 
 f:
@@ -151,7 +151,7 @@ end:
   ret i1 %ov
 }
 
-define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) nounwind {
+define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) nounwind {
 ; CHECK-LABEL: usubo_ult_cmp_dominates_i64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x23, [sp, #-48]! // 16-byte Folded Spill
@@ -191,7 +191,7 @@ f:
 
 end:
   %s = sub i64 %x, %y
-  store i64 %s, i64* %p
+  store i64 %s, ptr %p
   ret i1 %ov
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/cmp-bool.ll b/llvm/test/CodeGen/AArch64/cmp-bool.ll
index 907d982a7efd1..cddff9799f12b 100644
--- a/llvm/test/CodeGen/AArch64/cmp-bool.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-bool.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
 
-define void @bool_eq(i1 zeroext %a, i1 zeroext %b, void ()* nocapture %c) nounwind {
+define void @bool_eq(i1 zeroext %a, i1 zeroext %b, ptr nocapture %c) nounwind {
 ; CHECK-LABEL: bool_eq:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w0, w1
@@ -22,7 +22,7 @@ if.end:
   ret void
 }
 
-define void @bool_ne(i1 zeroext %a, i1 zeroext %b, void ()* nocapture %c) nounwind {
+define void @bool_ne(i1 zeroext %a, i1 zeroext %b, ptr nocapture %c) nounwind {
 ; CHECK-LABEL: bool_ne:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w0, w1

diff  --git a/llvm/test/CodeGen/AArch64/cmp-frameindex.ll b/llvm/test/CodeGen/AArch64/cmp-frameindex.ll
index 03420fe2fc8f2..186b81ad8b7c3 100644
--- a/llvm/test/CodeGen/AArch64/cmp-frameindex.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-frameindex.ll
@@ -15,7 +15,7 @@ define void @test_frameindex_cmp() {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %stack = alloca i8
-  %stack.int = ptrtoint i8* %stack to i64
+  %stack.int = ptrtoint ptr %stack to i64
   %cmp = icmp ne i64 %stack.int, 0
   br i1 %cmp, label %bb1, label %bb2
 

diff  --git a/llvm/test/CodeGen/AArch64/cmpwithshort.ll b/llvm/test/CodeGen/AArch64/cmpwithshort.ll
index a0475c4efcce3..8dbfdae5df903 100644
--- a/llvm/test/CodeGen/AArch64/cmpwithshort.ll
+++ b/llvm/test/CodeGen/AArch64/cmpwithshort.ll
@@ -1,12 +1,11 @@
 ; RUN: llc < %s -O3 -mtriple=aarch64-eabi | FileCheck %s 
 
-define i16 @test_1cmp_signed_1(i16* %ptr1) {
+define i16 @test_1cmp_signed_1(ptr %ptr1) {
 ; CHECK-LABEL: @test_1cmp_signed_1
 ; CHECK: ldrsh
 ; CHECK-NEXT: cmn
 entry:
-  %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
-  %val = load i16, i16* %addr, align 2
+  %val = load i16, ptr %ptr1, align 2
   %cmp = icmp eq i16 %val, -1
   br i1 %cmp, label %if, label %if.then
 if:
@@ -15,13 +14,12 @@ if.then:
   ret i16 0
 }
 
-define i16 @test_1cmp_signed_2(i16* %ptr1) {
+define i16 @test_1cmp_signed_2(ptr %ptr1) {
 ; CHECK-LABEL: @test_1cmp_signed_2
 ; CHECK: ldrsh
 ; CHECK-NEXT: cmn
 entry:
-  %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
-  %val = load i16, i16* %addr, align 2
+  %val = load i16, ptr %ptr1, align 2
   %cmp = icmp sge i16 %val, -1
   br i1 %cmp, label %if, label %if.then
 if:
@@ -30,13 +28,12 @@ if.then:
   ret i16 0
 }
 
-define i16 @test_1cmp_unsigned_1(i16* %ptr1) {
+define i16 @test_1cmp_unsigned_1(ptr %ptr1) {
 ; CHECK-LABEL: @test_1cmp_unsigned_1
 ; CHECK: ldrsh
 ; CHECK-NEXT: cmn
 entry:
-  %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
-  %val = load i16, i16* %addr, align 2
+  %val = load i16, ptr %ptr1, align 2
   %cmp = icmp uge i16 %val, -1
   br i1 %cmp, label %if, label %if.then
 if:

diff  --git a/llvm/test/CodeGen/AArch64/cmpxchg-O0.ll b/llvm/test/CodeGen/AArch64/cmpxchg-O0.ll
index fc498fb07079e..1bf1477b79ced 100644
--- a/llvm/test/CodeGen/AArch64/cmpxchg-O0.ll
+++ b/llvm/test/CodeGen/AArch64/cmpxchg-O0.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -O0 -fast-isel=0 -global-isel=false %s -o - | FileCheck -enable-var-scope %s
 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -O0 -fast-isel=0 -global-isel=false -mattr=+outline-atomics %s -o - | FileCheck -enable-var-scope %s --check-prefix=OUTLINE-ATOMICS
 
-define { i8, i1 } @test_cmpxchg_8(i8* %addr, i8 %desired, i8 %new) nounwind {
+define { i8, i1 } @test_cmpxchg_8(ptr %addr, i8 %desired, i8 %new) nounwind {
 ; OUTLINE-ATOMICS: bl __aarch64_cas1_acq_rel
 ; CHECK-LABEL: test_cmpxchg_8:
 ; CHECK:     mov [[ADDR:x[0-9]+]], x0
@@ -14,11 +14,11 @@ define { i8, i1 } @test_cmpxchg_8(i8* %addr, i8 %desired, i8 %new) nounwind {
 ; CHECK: [[DONE]]:
 ; CHECK:     subs {{w[0-9]+}}, [[OLD]], w1, uxtb
 ; CHECK:     cset {{w[0-9]+}}, eq
-  %res = cmpxchg i8* %addr, i8 %desired, i8 %new seq_cst monotonic
+  %res = cmpxchg ptr %addr, i8 %desired, i8 %new seq_cst monotonic
   ret { i8, i1 } %res
 }
 
-define { i16, i1 } @test_cmpxchg_16(i16* %addr, i16 %desired, i16 %new) nounwind {
+define { i16, i1 } @test_cmpxchg_16(ptr %addr, i16 %desired, i16 %new) nounwind {
 ; OUTLINE-ATOMICS: bl __aarch64_cas2_acq_rel
 ; CHECK-LABEL: test_cmpxchg_16:
 ; CHECK:     mov [[ADDR:x[0-9]+]], x0
@@ -31,11 +31,11 @@ define { i16, i1 } @test_cmpxchg_16(i16* %addr, i16 %desired, i16 %new) nounwind
 ; CHECK: [[DONE]]:
 ; CHECK:     subs {{w[0-9]+}}, [[OLD]], w1
 ; CHECK:     cset {{w[0-9]+}}, eq
-  %res = cmpxchg i16* %addr, i16 %desired, i16 %new seq_cst monotonic
+  %res = cmpxchg ptr %addr, i16 %desired, i16 %new seq_cst monotonic
   ret { i16, i1 } %res
 }
 
-define { i32, i1 } @test_cmpxchg_32(i32* %addr, i32 %desired, i32 %new) nounwind {
+define { i32, i1 } @test_cmpxchg_32(ptr %addr, i32 %desired, i32 %new) nounwind {
 ; OUTLINE-ATOMICS: bl __aarch64_cas4_acq_rel
 ; CHECK-LABEL: test_cmpxchg_32:
 ; CHECK:     mov [[ADDR:x[0-9]+]], x0
@@ -48,11 +48,11 @@ define { i32, i1 } @test_cmpxchg_32(i32* %addr, i32 %desired, i32 %new) nounwind
 ; CHECK: [[DONE]]:
 ; CHECK:     subs {{w[0-9]+}}, [[OLD]], w1
 ; CHECK:     cset {{w[0-9]+}}, eq
-  %res = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
+  %res = cmpxchg ptr %addr, i32 %desired, i32 %new seq_cst monotonic
   ret { i32, i1 } %res
 }
 
-define { i64, i1 } @test_cmpxchg_64(i64* %addr, i64 %desired, i64 %new) nounwind {
+define { i64, i1 } @test_cmpxchg_64(ptr %addr, i64 %desired, i64 %new) nounwind {
 ; OUTLINE-ATOMICS: bl __aarch64_cas8_acq_rel
 ; CHECK-LABEL: test_cmpxchg_64:
 ; CHECK:     mov [[ADDR:x[0-9]+]], x0
@@ -65,11 +65,11 @@ define { i64, i1 } @test_cmpxchg_64(i64* %addr, i64 %desired, i64 %new) nounwind
 ; CHECK: [[DONE]]:
 ; CHECK:     subs {{x[0-9]+}}, [[OLD]], x1
 ; CHECK:     cset {{w[0-9]+}}, eq
-  %res = cmpxchg i64* %addr, i64 %desired, i64 %new seq_cst monotonic
+  %res = cmpxchg ptr %addr, i64 %desired, i64 %new seq_cst monotonic
   ret { i64, i1 } %res
 }
 
-define { i128, i1 } @test_cmpxchg_128(i128* %addr, i128 %desired, i128 %new) nounwind {
+define { i128, i1 } @test_cmpxchg_128(ptr %addr, i128 %desired, i128 %new) nounwind {
 ; OUTLINE-ATOMICS: bl __aarch64_cas16_acq_rel
 ; CHECK-LABEL: test_cmpxchg_128:
 ; CHECK:     mov [[ADDR:x[0-9]+]], x0
@@ -83,7 +83,7 @@ define { i128, i1 } @test_cmpxchg_128(i128* %addr, i128 %desired, i128 %new) nou
 ; CHECK:     stlxp [[STATUS:w[0-9]+]], x4, x5, [[[ADDR]]]
 ; CHECK:     cbnz [[STATUS]], [[RETRY]]
 ; CHECK: [[DONE]]:
-  %res = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst monotonic
+  %res = cmpxchg ptr %addr, i128 %desired, i128 %new seq_cst monotonic
   ret { i128, i1 } %res
 }
 
@@ -91,7 +91,7 @@ define { i128, i1 } @test_cmpxchg_128(i128* %addr, i128 %desired, i128 %new) nou
 ; type-legalized into some kind of BUILD_PAIR operation and crashed when this
 ; was false.
 @var128 = dso_local global i128 0
-define {i128, i1} @test_cmpxchg_128_unsplit(i128* %addr) {
+define {i128, i1} @test_cmpxchg_128_unsplit(ptr %addr) {
 ; OUTLINE-ATOMICS: bl __aarch64_cas16_acq_rel
 ; CHECK-LABEL: test_cmpxchg_128_unsplit:
 ; CHECK:     mov [[ADDR:x[0-9]+]], x0
@@ -109,8 +109,8 @@ define {i128, i1} @test_cmpxchg_128_unsplit(i128* %addr) {
 ; CHECK:     cbnz [[STATUS]], [[RETRY]]
 ; CHECK: [[DONE]]:
 
-  %desired = load volatile i128, i128* @var128
-  %new = load volatile i128, i128* @var128
-  %val = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+  %desired = load volatile i128, ptr @var128
+  %new = load volatile i128, ptr @var128
+  %val = cmpxchg ptr %addr, i128 %desired, i128 %new seq_cst seq_cst
   ret { i128, i1 } %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
index 8fbac25d4f483..ec1020b392cce 100644
--- a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
+++ b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64-apple-ios7.0 -o - %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-apple-ios7.0 -mattr=+outline-atomics -o - %s | FileCheck %s --check-prefix=OUTLINE-ATOMICS
 
-define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
+define i32 @test_return(ptr %p, i32 %oldval, i32 %newval) {
 ; CHECK-LABEL: test_return:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:  LBB0_1: ; %cmpxchg.start
@@ -42,14 +42,14 @@ define i32 @test_return(i32* %p, i32 %oldval, i32 %newval) {
 ; OUTLINE-ATOMICS-NEXT:    cset w0, eq
 ; OUTLINE-ATOMICS-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i32 %oldval, i32 %newval seq_cst seq_cst
   %success = extractvalue { i32, i1 } %pair, 1
   %conv = zext i1 %success to i32
   ret i32 %conv
 }
 
 ; FIXME: DAG combine should be able to deal with this EOR better.
-define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
+define i1 @test_return_bool(ptr %value, i8 %oldValue, i8 %newValue) {
 ; CHECK-LABEL: test_return_bool:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    and w8, w1, #0xff
@@ -93,13 +93,13 @@ define i1 @test_return_bool(i8* %value, i8 %oldValue, i8 %newValue) {
 ; OUTLINE-ATOMICS-NEXT:    eor w0, w8, #0x1
 ; OUTLINE-ATOMICS-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
-  %pair = cmpxchg i8* %value, i8 %oldValue, i8 %newValue acq_rel monotonic
+  %pair = cmpxchg ptr %value, i8 %oldValue, i8 %newValue acq_rel monotonic
   %success = extractvalue { i8, i1 } %pair, 1
   %failure = xor i1 %success, 1
   ret i1 %failure
 }
 
-define void @test_conditional(i32* %p, i32 %oldval, i32 %newval) {
+define void @test_conditional(ptr %p, i32 %oldval, i32 %newval) {
 ; CHECK-LABEL: test_conditional:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:  LBB2_1: ; %cmpxchg.start
@@ -142,7 +142,7 @@ define void @test_conditional(i32* %p, i32 %oldval, i32 %newval) {
 ; OUTLINE-ATOMICS-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    b _baz
-  %pair = cmpxchg i32* %p, i32 %oldval, i32 %newval seq_cst seq_cst
+  %pair = cmpxchg ptr %p, i32 %oldval, i32 %newval seq_cst seq_cst
   %success = extractvalue { i32, i1 } %pair, 1
   br i1 %success, label %true, label %false
 
@@ -162,7 +162,7 @@ declare void @bar()
 declare void @baz()
 
 ; verify the preheader is simplified by simplifycfg.
-define i1 @test_conditional2(i32 %a, i32 %b, i32* %c) {
+define i1 @test_conditional2(i32 %a, i32 %b, ptr %c) {
 ; CHECK-LABEL: test_conditional2:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
@@ -262,7 +262,7 @@ define i1 @test_conditional2(i32 %a, i32 %b, i32* %c) {
 ; OUTLINE-ATOMICS-NEXT:    ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
 ; OUTLINE-ATOMICS-NEXT:    ret
 entry:
-  %pair = cmpxchg i32* %c, i32 %a, i32 %b seq_cst seq_cst
+  %pair = cmpxchg ptr %c, i32 %a, i32 %b seq_cst seq_cst
   %success = extractvalue { i32, i1 } %pair, 1
   br label %for.cond
 
@@ -280,13 +280,13 @@ for.cond.cleanup:                                 ; preds = %for.cond
 for.body:                                         ; preds = %for.cond
   %or = or i32 %a, %b
   %idxprom = sext i32 %dec to i64
-  %arrayidx = getelementptr inbounds i32, i32* %c, i64 %idxprom
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %c, i64 %idxprom
+  %0 = load i32, ptr %arrayidx, align 4
   %cmp = icmp eq i32 %or, %0
   br i1 %cmp, label %if.end, label %if.then
 
 if.then:                                          ; preds = %for.body
-  store i32 %or, i32* %arrayidx, align 4
+  store i32 %or, ptr %arrayidx, align 4
   tail call void @foo()
   br label %if.end
 

diff  --git a/llvm/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll b/llvm/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll
index ab083b3c91d90..506a8272ab5bc 100644
--- a/llvm/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll
+++ b/llvm/test/CodeGen/AArch64/cmpxchg-lse-even-regs.ll
@@ -5,7 +5,7 @@
 ; doesn't allocate odd ones and that it can copy them around properly. N.b. we
 ; don't actually check that they're sequential because FileCheck can't; odd/even
 ; will have to be good enough.
-define void @test_atomic_cmpxchg_i128_register_shuffling(i128* %addr, i128 %desired, i128 %new) nounwind {
+define void @test_atomic_cmpxchg_i128_register_shuffling(ptr %addr, i128 %desired, i128 %new) nounwind {
 ; CHECK-LABEL: test_atomic_cmpxchg_i128_register_shuffling:
 ; CHECK-DAG: mov [[DESIRED_LO:x[0-9]*[02468]]], x1
 ; CHECK-DAG: mov [[DESIRED_HI:x[0-9]*[13579]]], x2
@@ -13,6 +13,6 @@ define void @test_atomic_cmpxchg_i128_register_shuffling(i128* %addr, i128 %desi
 ; CHECK-DAG: mov [[NEW_HI:x[0-9]*[13579]]], x4
 ; CHECK: caspal [[DESIRED_LO]], [[DESIRED_HI]], [[NEW_LO]], [[NEW_HI]], [x0]
 
-  %res = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst
+  %res = cmpxchg ptr %addr, i128 %desired, i128 %new seq_cst seq_cst
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/code-model-large-abs.ll b/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
index 2a293322190be..e6f1f28778df0 100644
--- a/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
+++ b/llvm/test/CodeGen/AArch64/code-model-large-abs.ll
@@ -5,9 +5,9 @@
 @var32 = dso_local global i32 0
 @var64 = dso_local global i64 0
 
-define dso_local i8* @global_addr() {
+define dso_local ptr @global_addr() {
 ; CHECK-LABEL: global_addr:
-  ret i8* @var8
+  ret ptr @var8
   ; The movz/movk calculation should end up returned directly in x0.
 ; CHECK: movz x0, #:abs_g0_nc:var8
 ; CHECK: movk x0, #:abs_g1_nc:var8
@@ -18,7 +18,7 @@ define dso_local i8* @global_addr() {
 
 define dso_local i8 @global_i8() {
 ; CHECK-LABEL: global_i8:
-  %val = load i8, i8* @var8
+  %val = load i8, ptr @var8
   ret i8 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var8
 ; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var8
@@ -29,7 +29,7 @@ define dso_local i8 @global_i8() {
 
 define dso_local i16 @global_i16() {
 ; CHECK-LABEL: global_i16:
-  %val = load i16, i16* @var16
+  %val = load i16, ptr @var16
   ret i16 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var16
 ; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var16
@@ -40,7 +40,7 @@ define dso_local i16 @global_i16() {
 
 define dso_local i32 @global_i32() {
 ; CHECK-LABEL: global_i32:
-  %val = load i32, i32* @var32
+  %val = load i32, ptr @var32
   ret i32 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var32
 ; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var32
@@ -51,7 +51,7 @@ define dso_local i32 @global_i32() {
 
 define dso_local i64 @global_i64() {
 ; CHECK-LABEL: global_i64:
-  %val = load i64, i64* @var64
+  %val = load i64, ptr @var64
   ret i64 %val
 ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g0_nc:var64
 ; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var64

diff  --git a/llvm/test/CodeGen/AArch64/code-model-tiny-abs.ll b/llvm/test/CodeGen/AArch64/code-model-tiny-abs.ll
index 7b71c8e0547f6..9a2f4840f79e2 100644
--- a/llvm/test/CodeGen/AArch64/code-model-tiny-abs.ll
+++ b/llvm/test/CodeGen/AArch64/code-model-tiny-abs.ll
@@ -5,9 +5,9 @@
 @var32 = dso_local global i32 0
 @var64 = dso_local global i64 0
 
-define dso_local i8* @global_addr() {
+define dso_local ptr @global_addr() {
 ; CHECK-LABEL: global_addr:
-  ret i8* @var8
+  ret ptr @var8
   ; The adr calculation should end up returned directly in x0.
 ; CHECK: adr x0, var8
 ; CHECK-NEXT: ret
@@ -15,7 +15,7 @@ define dso_local i8* @global_addr() {
 
 define dso_local i8 @global_i8() {
 ; CHECK-LABEL: global_i8:
-  %val = load i8, i8* @var8
+  %val = load i8, ptr @var8
   ret i8 %val
 ; CHECK: adr x[[ADDR_REG:[0-9]+]], var8
 ; CHECK: ldrb w0, [x[[ADDR_REG]]]
@@ -23,7 +23,7 @@ define dso_local i8 @global_i8() {
 
 define dso_local i16 @global_i16() {
 ; CHECK-LABEL: global_i16:
-  %val = load i16, i16* @var16
+  %val = load i16, ptr @var16
   ret i16 %val
 ; CHECK: adr x[[ADDR_REG:[0-9]+]], var16
 ; CHECK: ldrh w0, [x[[ADDR_REG]]]
@@ -31,14 +31,14 @@ define dso_local i16 @global_i16() {
 
 define dso_local i32 @global_i32() {
 ; CHECK-LABEL: global_i32:
-  %val = load i32, i32* @var32
+  %val = load i32, ptr @var32
   ret i32 %val
 ; CHECK: ldr w0, var32
 }
 
 define dso_local i64 @global_i64() {
 ; CHECK-LABEL: global_i64:
-  %val = load i64, i64* @var64
+  %val = load i64, ptr @var64
   ret i64 %val
 ; CHECK: ldr x0, var64
 }

diff  --git a/llvm/test/CodeGen/AArch64/combine-andintoload.ll b/llvm/test/CodeGen/AArch64/combine-andintoload.ll
index 693d318b272a2..82e6ae936253f 100644
--- a/llvm/test/CodeGen/AArch64/combine-andintoload.ll
+++ b/llvm/test/CodeGen/AArch64/combine-andintoload.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=aarch64-none-eabi -o - | FileCheck %s
 ; RUN: llc < %s -mtriple=aarch64_be-none-eabi -o - | FileCheck %s --check-prefix=CHECKBE
 
-define i64 @load32_and16_and(i32* %p, i64 %y) {
+define i64 @load32_and16_and(ptr %p, i64 %y) {
 ; CHECK-LABEL: load32_and16_and:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -16,14 +16,14 @@ define i64 @load32_and16_and(i32* %p, i64 %y) {
 ; CHECKBE-NEXT:    and w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = zext i32 %x to i64
   %ym = and i64 %y, 65535
   %r = and i64 %ym, %xz
   ret i64 %r
 }
 
-define i64 @load32_and16_andr(i32* %p, i64 %y) {
+define i64 @load32_and16_andr(ptr %p, i64 %y) {
 ; CHECK-LABEL: load32_and16_andr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -37,14 +37,14 @@ define i64 @load32_and16_andr(i32* %p, i64 %y) {
 ; CHECKBE-NEXT:    and w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = zext i32 %x to i64
   %a = and i64 %y, %xz
   %r = and i64 %a, 65535
   ret i64 %r
 }
 
-define i64 @load32_and16_and_sext(i32* %p, i64 %y) {
+define i64 @load32_and16_and_sext(ptr %p, i64 %y) {
 ; CHECK-LABEL: load32_and16_and_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -58,14 +58,14 @@ define i64 @load32_and16_and_sext(i32* %p, i64 %y) {
 ; CHECKBE-NEXT:    and w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = sext i32 %x to i64
   %a = and i64 %y, %xz
   %r = and i64 %a, 65535
   ret i64 %r
 }
 
-define i64 @load32_and16_or(i32* %p, i64 %y) {
+define i64 @load32_and16_or(ptr %p, i64 %y) {
 ; CHECK-LABEL: load32_and16_or:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -79,14 +79,14 @@ define i64 @load32_and16_or(i32* %p, i64 %y) {
 ; CHECKBE-NEXT:    orr w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = zext i32 %x to i64
   %a = or i64 %y, %xz
   %r = and i64 %a, 65535
   ret i64 %r
 }
 
-define i64 @load32_and16_orr(i32* %p, i64 %y) {
+define i64 @load32_and16_orr(ptr %p, i64 %y) {
 ; CHECK-LABEL: load32_and16_orr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -100,14 +100,14 @@ define i64 @load32_and16_orr(i32* %p, i64 %y) {
 ; CHECKBE-NEXT:    and x9, x1, #0xffff
 ; CHECKBE-NEXT:    orr x0, x9, x8
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = zext i32 %x to i64
   %ym = and i64 %y, 65535
   %r = or i64 %ym, %xz
   ret i64 %r
 }
 
-define i64 @load32_and16_xorm1(i32* %p) {
+define i64 @load32_and16_xorm1(ptr %p) {
 ; CHECK-LABEL: load32_and16_xorm1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -121,14 +121,14 @@ define i64 @load32_and16_xorm1(i32* %p) {
 ; CHECKBE-NEXT:    mvn w8, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = zext i32 %x to i64
   %a = xor i64 %xz, -1
   %r = and i64 %a, 65535
   ret i64 %r
 }
 
-define i64 @load64_and16(i64* %p, i128 %y) {
+define i64 @load64_and16(ptr %p, i128 %y) {
 ; CHECK-LABEL: load64_and16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -140,7 +140,7 @@ define i64 @load64_and16(i64* %p, i128 %y) {
 ; CHECKBE-NEXT:    ldrh w8, [x0, #6]
 ; CHECKBE-NEXT:    and x0, x3, x8
 ; CHECKBE-NEXT:    ret
-  %x = load i64, i64* %p, align 4
+  %x = load i64, ptr %p, align 4
   %xz = zext i64 %x to i128
   %a = and i128 %y, %xz
   %t = trunc i128 %a to i64
@@ -148,7 +148,7 @@ define i64 @load64_and16(i64* %p, i128 %y) {
   ret i64 %r
 }
 
-define i64 @load16_and16(i16* %p, i64 %y) {
+define i64 @load16_and16(ptr %p, i64 %y) {
 ; CHECK-LABEL: load16_and16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -160,14 +160,14 @@ define i64 @load16_and16(i16* %p, i64 %y) {
 ; CHECKBE-NEXT:    ldrh w8, [x0]
 ; CHECKBE-NEXT:    and x0, x1, x8
 ; CHECKBE-NEXT:    ret
-  %x = load i16, i16* %p, align 4
+  %x = load i16, ptr %p, align 4
   %xz = zext i16 %x to i64
   %a = and i64 %y, %xz
   %r = and i64 %a, 65535
   ret i64 %r
 }
 
-define i64 @load16_and8(i16* %p, i64 %y) {
+define i64 @load16_and8(ptr %p, i64 %y) {
 ; CHECK-LABEL: load16_and8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -181,14 +181,14 @@ define i64 @load16_and8(i16* %p, i64 %y) {
 ; CHECKBE-NEXT:    and w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xff
 ; CHECKBE-NEXT:    ret
-  %x = load i16, i16* %p, align 4
+  %x = load i16, ptr %p, align 4
   %xz = zext i16 %x to i64
   %a = and i64 %y, %xz
   %r = and i64 %a, 255
   ret i64 %r
 }
 
-define i64 @load16_and7(i16* %p, i64 %y) {
+define i64 @load16_and7(ptr %p, i64 %y) {
 ; CHECK-LABEL: load16_and7:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -202,14 +202,14 @@ define i64 @load16_and7(i16* %p, i64 %y) {
 ; CHECKBE-NEXT:    and w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0x7f
 ; CHECKBE-NEXT:    ret
-  %x = load i16, i16* %p, align 4
+  %x = load i16, ptr %p, align 4
   %xz = zext i16 %x to i64
   %a = and i64 %y, %xz
   %r = and i64 %a, 127
   ret i64 %r
 }
 
-define i64 @load8_and16(i8* %p, i64 %y) {
+define i64 @load8_and16(ptr %p, i64 %y) {
 ; CHECK-LABEL: load8_and16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -221,14 +221,14 @@ define i64 @load8_and16(i8* %p, i64 %y) {
 ; CHECKBE-NEXT:    ldrb w8, [x0]
 ; CHECKBE-NEXT:    and x0, x1, x8
 ; CHECKBE-NEXT:    ret
-  %x = load i8, i8* %p, align 4
+  %x = load i8, ptr %p, align 4
   %xz = zext i8 %x to i64
   %a = and i64 %y, %xz
   %r = and i64 %a, 65535
   ret i64 %r
 }
 
-define i64 @load8_and16_zext(i8* %p, i8 %y) {
+define i64 @load8_and16_zext(ptr %p, i8 %y) {
 ; CHECK-LABEL: load8_and16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -242,7 +242,7 @@ define i64 @load8_and16_zext(i8* %p, i8 %y) {
 ; CHECKBE-NEXT:    and w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xff
 ; CHECKBE-NEXT:    ret
-  %x = load i8, i8* %p, align 4
+  %x = load i8, ptr %p, align 4
   %xz = zext i8 %x to i64
   %yz = zext i8 %y to i64
   %a = and i64 %yz, %xz
@@ -250,7 +250,7 @@ define i64 @load8_and16_zext(i8* %p, i8 %y) {
   ret i64 %r
 }
 
-define i64 @load8_and16_sext(i8* %p, i8 %y) {
+define i64 @load8_and16_sext(ptr %p, i8 %y) {
 ; CHECK-LABEL: load8_and16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -264,7 +264,7 @@ define i64 @load8_and16_sext(i8* %p, i8 %y) {
 ; CHECKBE-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECKBE-NEXT:    and x0, x1, x8
 ; CHECKBE-NEXT:    ret
-  %x = load i8, i8* %p, align 4
+  %x = load i8, ptr %p, align 4
   %xz = zext i8 %x to i64
   %yz = sext i8 %y to i64
   %a = and i64 %yz, %xz
@@ -272,7 +272,7 @@ define i64 @load8_and16_sext(i8* %p, i8 %y) {
   ret i64 %r
 }
 
-define i64 @load8_and16_or(i8* %p, i64 %y) {
+define i64 @load8_and16_or(ptr %p, i64 %y) {
 ; CHECK-LABEL: load8_and16_or:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -286,14 +286,14 @@ define i64 @load8_and16_or(i8* %p, i64 %y) {
 ; CHECKBE-NEXT:    orr w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i8, i8* %p, align 4
+  %x = load i8, ptr %p, align 4
   %xz = zext i8 %x to i64
   %a = or i64 %y, %xz
   %r = and i64 %a, 65535
   ret i64 %r
 }
 
-define i64 @load16_and8_manyext(i16* %p, i32 %y) {
+define i64 @load16_and8_manyext(ptr %p, i32 %y) {
 ; CHECK-LABEL: load16_and8_manyext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -307,7 +307,7 @@ define i64 @load16_and8_manyext(i16* %p, i32 %y) {
 ; CHECKBE-NEXT:    and w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xff
 ; CHECKBE-NEXT:    ret
-  %x = load i16, i16* %p, align 4
+  %x = load i16, ptr %p, align 4
   %xz = zext i16 %x to i32
   %a = and i32 %y, %xz
   %az = zext i32 %a to i64
@@ -315,7 +315,7 @@ define i64 @load16_and8_manyext(i16* %p, i32 %y) {
   ret i64 %r
 }
 
-define i64 @multiple_load(i16* %p, i32* %q) {
+define i64 @multiple_load(ptr %p, ptr %q) {
 ; CHECK-LABEL: multiple_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -331,16 +331,16 @@ define i64 @multiple_load(i16* %p, i32* %q) {
 ; CHECKBE-NEXT:    and w8, w9, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xff
 ; CHECKBE-NEXT:    ret
-  %x = load i16, i16* %p, align 4
+  %x = load i16, ptr %p, align 4
   %xz = zext i16 %x to i64
-  %y = load i32, i32* %q, align 4
+  %y = load i32, ptr %q, align 4
   %yz = zext i32 %y to i64
   %a = and i64 %yz, %xz
   %r = and i64 %a, 255
   ret i64 %r
 }
 
-define i64 @multiple_load_or(i16* %p, i32* %q) {
+define i64 @multiple_load_or(ptr %p, ptr %q) {
 ; CHECK-LABEL: multiple_load_or:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
@@ -356,16 +356,16 @@ define i64 @multiple_load_or(i16* %p, i32* %q) {
 ; CHECKBE-NEXT:    orr w8, w9, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xff
 ; CHECKBE-NEXT:    ret
-  %x = load i16, i16* %p, align 4
+  %x = load i16, ptr %p, align 4
   %xz = zext i16 %x to i64
-  %y = load i32, i32* %q, align 4
+  %y = load i32, ptr %q, align 4
   %yz = zext i32 %y to i64
   %a = or i64 %yz, %xz
   %r = and i64 %a, 255
   ret i64 %r
 }
 
-define i64 @load32_and16_zexty(i32* %p, i32 %y) {
+define i64 @load32_and16_zexty(ptr %p, i32 %y) {
 ; CHECK-LABEL: load32_and16_zexty:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -379,7 +379,7 @@ define i64 @load32_and16_zexty(i32* %p, i32 %y) {
 ; CHECKBE-NEXT:    orr w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = zext i32 %x to i64
   %yz = zext i32 %y to i64
   %a = or i64 %yz, %xz
@@ -387,7 +387,7 @@ define i64 @load32_and16_zexty(i32* %p, i32 %y) {
   ret i64 %r
 }
 
-define i64 @load32_and16_sexty(i32* %p, i32 %y) {
+define i64 @load32_and16_sexty(ptr %p, i32 %y) {
 ; CHECK-LABEL: load32_and16_sexty:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -401,7 +401,7 @@ define i64 @load32_and16_sexty(i32* %p, i32 %y) {
 ; CHECKBE-NEXT:    orr w8, w1, w8
 ; CHECKBE-NEXT:    and x0, x8, #0xffff
 ; CHECKBE-NEXT:    ret
-  %x = load i32, i32* %p, align 4
+  %x = load i32, ptr %p, align 4
   %xz = zext i32 %x to i64
   %yz = sext i32 %y to i64
   %a = or i64 %yz, %xz
@@ -409,7 +409,7 @@ define i64 @load32_and16_sexty(i32* %p, i32 %y) {
   ret i64 %r
 }
 
-define zeroext i1 @bigger(i8* nocapture readonly %c, i8* nocapture readonly %e, i64 %d, i64 %p1) {
+define zeroext i1 @bigger(ptr nocapture readonly %c, ptr nocapture readonly %e, i64 %d, i64 %p1) {
 ; CHECK-LABEL: bigger:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0, x2]
@@ -442,12 +442,12 @@ entry:
   %1 = and i16 %0, 7
   %sh_prom = sub nuw nsw i16 8, %1
   %shl = shl nuw nsw i16 5, %sh_prom
-  %arrayidx = getelementptr inbounds i8, i8* %c, i64 %d
-  %2 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %c, i64 %d
+  %2 = load i8, ptr %arrayidx, align 1
   %3 = and i16 %shl, 255
   %conv2 = zext i16 %3 to i32
-  %arrayidx3 = getelementptr inbounds i8, i8* %e, i64 %d
-  %4 = load i8, i8* %arrayidx3, align 1
+  %arrayidx3 = getelementptr inbounds i8, ptr %e, i64 %d
+  %4 = load i8, ptr %arrayidx3, align 1
   %5 = xor i8 %4, %2
   %6 = zext i8 %5 to i32
   %7 = and i32 %6, %conv2

diff  --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index 437185efd3430..4a4f0571fb4fc 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -44,13 +44,13 @@ define i32 @combine_gt_ge_10() #0 {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp sgt i32 %0, 10
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32, i32* @b, align 4
-  %2 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %land.lhs.true3
 
@@ -59,8 +59,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
-  %3 = load i32, i32* @b, align 4
-  %4 = load i32, i32* @d, align 4
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -111,13 +111,13 @@ define i32 @combine_gt_lt_5() #0 {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp sgt i32 %0, 5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32, i32* @b, align 4
-  %2 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -126,8 +126,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32, i32* @b, align 4
-  %4 = load i32, i32* @d, align 4
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -176,13 +176,13 @@ define i32 @combine_lt_ge_5() #0 {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp slt i32 %0, 5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32, i32* @b, align 4
-  %2 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %land.lhs.true3
 
@@ -191,8 +191,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
-  %3 = load i32, i32* @b, align 4
-  %4 = load i32, i32* @d, align 4
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -243,13 +243,13 @@ define i32 @combine_lt_gt_5() #0 {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp slt i32 %0, 5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32, i32* @b, align 4
-  %2 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -258,8 +258,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32, i32* @b, align 4
-  %4 = load i32, i32* @d, align 4
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -310,13 +310,13 @@ define i32 @combine_gt_lt_n5() #0 {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp sgt i32 %0, -5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32, i32* @b, align 4
-  %2 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -325,8 +325,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32, i32* @b, align 4
-  %4 = load i32, i32* @d, align 4
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -377,13 +377,13 @@ define i32 @combine_lt_gt_n5() #0 {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp slt i32 %0, -5
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32, i32* @b, align 4
-  %2 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %if.end
 
@@ -392,8 +392,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false
-  %3 = load i32, i32* @b, align 4
-  %4 = load i32, i32* @d, align 4
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 
@@ -407,12 +407,12 @@ return:                                           ; preds = %if.end, %land.lhs.t
 
 %struct.Struct = type { i64, i64 }
 
- at glob = internal unnamed_addr global %struct.Struct* null, align 8
+ at glob = internal unnamed_addr global ptr null, align 8
 
-declare %struct.Struct* @Update(%struct.Struct*) #1
+declare ptr @Update(ptr) #1
 
 ; no checks for this case, it just should be processed without errors
-define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdCall) #0 {
+define void @combine_non_adjacent_cmp_br(ptr nocapture readonly %hdCall) #0 {
 ; CHECK-LABEL: combine_non_adjacent_cmp_br:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-48]! // 8-byte Folded Spill
@@ -452,19 +452,18 @@ define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdC
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 entry:
-  %size = getelementptr inbounds %struct.Struct, %struct.Struct* %hdCall, i64 0, i32 0
-  %0 = load i64, i64* %size, align 8
+  %0 = load i64, ptr %hdCall, align 8
   br label %land.rhs
 
 land.rhs:
   %rp.06 = phi i64 [ %0, %entry ], [ %sub, %while.body ]
-  %1 = load i64, i64* inttoptr (i64 24 to i64*), align 8
+  %1 = load i64, ptr inttoptr (i64 24 to ptr), align 8
   %cmp2 = icmp sgt i64 %1, 0
   br i1 %cmp2, label %while.body, label %while.end
 
 while.body:
-  %2 = load %struct.Struct*, %struct.Struct** @glob, align 8
-  %call = tail call %struct.Struct* @Update(%struct.Struct* %2) #2
+  %2 = load ptr, ptr @glob, align 8
+  %call = tail call ptr @Update(ptr %2) #2
   %sub = add nsw i64 %rp.06, -2
   %cmp = icmp slt i64 %0, %rp.06
   br i1 %cmp, label %land.rhs, label %while.end
@@ -525,7 +524,7 @@ define i32 @do_nothing_if_resultant_opcodes_would_
diff er() #0 {
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp4 = icmp slt i32 %0, -1
   br i1 %cmp4, label %while.body.preheader, label %while.end
 
@@ -540,7 +539,7 @@ while.body:                                       ; preds = %while.body, %while.
   br i1 %cmp, label %while.body, label %while.cond.while.end_crit_edge
 
 while.cond.while.end_crit_edge:                   ; preds = %while.body
-  %.pre = load i32, i32* @a, align 4
+  %.pre = load i32, ptr @a, align 4
   br label %while.end
 
 while.end:                                        ; preds = %while.cond.while.end_crit_edge, %entry
@@ -549,8 +548,8 @@ while.end:                                        ; preds = %while.cond.while.en
   br i1 %cmp1, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %while.end
-  %2 = load i32, i32* @b, align 4
-  %3 = load i32, i32* @d, align 4
+  %2 = load i32, ptr @b, align 4
+  %3 = load i32, ptr @d, align 4
   %cmp2 = icmp eq i32 %2, %3
   br i1 %cmp2, label %return, label %if.end
 
@@ -613,7 +612,7 @@ define i32 @do_nothing_if_compares_can_not_be_adjusted_to_each_other() #0 {
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp4 = icmp slt i32 %0, 1
   br i1 %cmp4, label %while.body.preheader, label %while.end
 
@@ -631,13 +630,13 @@ while.end.loopexit:                               ; preds = %while.body
   br label %while.end
 
 while.end:                                        ; preds = %while.end.loopexit, %entry
-  %1 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @c, align 4
   %cmp1 = icmp sgt i32 %1, -3
   br i1 %cmp1, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %while.end
-  %2 = load i32, i32* @b, align 4
-  %3 = load i32, i32* @d, align 4
+  %2 = load i32, ptr @b, align 4
+  %3 = load i32, ptr @d, align 4
   %cmp2 = icmp eq i32 %2, %3
   br i1 %cmp2, label %return, label %if.end
 
@@ -656,7 +655,7 @@ return:                                           ; preds = %if.end, %land.lhs.t
 ; fcmp d8, #0.0
 ; b.gt .LBB0_5
 
-define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) #0 {
+define i32 @fcmpri(i32 %argc, ptr nocapture readonly %argv) #0 {
 ; CHECK-LABEL: fcmpri:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
@@ -707,9 +706,9 @@ entry:
   br i1 %cmp, label %land.lhs.true, label %if.end
 
 land.lhs.true:                                    ; preds = %entry
-  %arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1
-  %0 = load i8*, i8** %arrayidx, align 8
-  %cmp1 = icmp eq i8* %0, null
+  %arrayidx = getelementptr inbounds ptr, ptr %argv, i64 1
+  %0 = load ptr, ptr %arrayidx, align 8
+  %cmp1 = icmp eq ptr %0, null
   br i1 %cmp1, label %if.end, label %return
 
 if.end:                                           ; preds = %land.lhs.true, %entry
@@ -775,7 +774,7 @@ falser:
   ret void
 }
 
-define i32 @combine_gt_ge_sel(i64 %v, i64* %p) #0 {
+define i32 @combine_gt_ge_sel(i64 %v, ptr %p) #0 {
 ; CHECK-LABEL: combine_gt_ge_sel:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:a
@@ -817,15 +816,15 @@ define i32 @combine_gt_ge_sel(i64 %v, i64* %p) #0 {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp sgt i32 %0, 0
   %m = select i1 %cmp, i64 %v, i64 0
-  store i64 %m, i64* %p
+  store i64 %m, ptr %p
   br i1 %cmp, label %lor.lhs.false, label %land.lhs.true
 
 land.lhs.true:                                    ; preds = %entry
-  %1 = load i32, i32* @b, align 4
-  %2 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
   %cmp1 = icmp eq i32 %1, %2
   br i1 %cmp1, label %return, label %land.lhs.true3
 
@@ -834,8 +833,8 @@ lor.lhs.false:                                    ; preds = %entry
   br i1 %cmp2, label %land.lhs.true3, label %if.end
 
 land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
-  %3 = load i32, i32* @b, align 4
-  %4 = load i32, i32* @d, align 4
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
   %cmp4 = icmp eq i32 %3, %4
   br i1 %cmp4, label %return, label %if.end
 

diff  --git a/llvm/test/CodeGen/AArch64/compare-branch.ll b/llvm/test/CodeGen/AArch64/compare-branch.ll
index 506314451224f..8fe24a1320f37 100644
--- a/llvm/test/CodeGen/AArch64/compare-branch.ll
+++ b/llvm/test/CodeGen/AArch64/compare-branch.ll
@@ -6,31 +6,31 @@
 define void @foo() {
 ; CHECK-LABEL: foo:
 
-  %val1 = load volatile i32, i32* @var32
+  %val1 = load volatile i32, ptr @var32
   %tst1 = icmp eq i32 %val1, 0
   br i1 %tst1, label %end, label %test2, !prof !1
 ; CHECK: cbz {{w[0-9]+}}, .LBB
 
 test2:
-  %val2 = load volatile i32, i32* @var32
+  %val2 = load volatile i32, ptr @var32
   %tst2 = icmp ne i32 %val2, 0
   br i1 %tst2, label %end, label %test3, !prof !1
 ; CHECK: cbnz {{w[0-9]+}}, .LBB
 
 test3:
-  %val3 = load volatile i64, i64* @var64
+  %val3 = load volatile i64, ptr @var64
   %tst3 = icmp eq i64 %val3, 0
   br i1 %tst3, label %end, label %test4, !prof !1
 ; CHECK: cbz {{x[0-9]+}}, .LBB
 
 test4:
-  %val4 = load volatile i64, i64* @var64
+  %val4 = load volatile i64, ptr @var64
   %tst4 = icmp ne i64 %val4, 0
   br i1 %tst4, label %end, label %test5, !prof !1
 ; CHECK: cbnz {{x[0-9]+}}, .LBB
 
 test5:
-  store volatile i64 %val4, i64* @var64
+  store volatile i64 %val4, ptr @var64
   ret void
 
 end:

diff  --git a/llvm/test/CodeGen/AArch64/complex-copy-noneon.ll b/llvm/test/CodeGen/AArch64/complex-copy-noneon.ll
index b7c7043360234..7e559edac8fb4 100644
--- a/llvm/test/CodeGen/AArch64/complex-copy-noneon.ll
+++ b/llvm/test/CodeGen/AArch64/complex-copy-noneon.ll
@@ -8,14 +8,14 @@ define void @store_combine() nounwind {
   %src = alloca { double, double }, align 8
   %dst = alloca { double, double }, align 8
 
-  %src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0
-  %src.real = load double, double* %src.realp
-  %src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1
-  %src.imag = load double, double* %src.imagp
+  %src.realp = getelementptr inbounds { double, double }, ptr %src, i32 0, i32 0
+  %src.real = load double, ptr %src.realp
+  %src.imagp = getelementptr inbounds { double, double }, ptr %src, i32 0, i32 1
+  %src.imag = load double, ptr %src.imagp
 
-  %dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0
-  %dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1
-  store double %src.real, double* %dst.realp
-  store double %src.imag, double* %dst.imagp
+  %dst.realp = getelementptr inbounds { double, double }, ptr %dst, i32 0, i32 0
+  %dst.imagp = getelementptr inbounds { double, double }, ptr %dst, i32 0, i32 1
+  store double %src.real, ptr %dst.realp
+  store double %src.imag, ptr %dst.imagp
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll b/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll
index e37e508ca2bf0..506e5e59a3529 100644
--- a/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/complex-int-to-fp.ll
@@ -3,10 +3,10 @@
 ; CHECK: autogen_SD19655
 ; CHECK: scvtf
 ; CHECK: ret
-define void @autogen_SD19655(<2 x i64>* %addr, <2 x float>* %addrfloat) {
-  %T = load <2 x i64>, <2 x i64>* %addr
+define void @autogen_SD19655(ptr %addr, ptr %addrfloat) {
+  %T = load <2 x i64>, ptr %addr
   %F = sitofp <2 x i64> %T to <2 x float>
-  store <2 x float> %F, <2 x float>* %addrfloat
+  store <2 x float> %F, ptr %addrfloat
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/concat-vector.ll b/llvm/test/CodeGen/AArch64/concat-vector.ll
index 1e5d2660a79eb..bd48c32566fc9 100644
--- a/llvm/test/CodeGen/AArch64/concat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/concat-vector.ll
@@ -50,14 +50,14 @@ define <8 x i16> @concat5(<4 x i16> %A, <4 x i16> %B) {
    ret <8 x i16> %v8i16
 }
 
-define <16 x i16> @concat6(<8 x i16>* %A, <8 x i16>* %B) {
+define <16 x i16> @concat6(ptr %A, ptr %B) {
 ; CHECK-LABEL: concat6:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    ret
-   %tmp1 = load <8 x i16>, <8 x i16>* %A
-   %tmp2 = load <8 x i16>, <8 x i16>* %B
+   %tmp1 = load <8 x i16>, ptr %A
+   %tmp2 = load <8 x i16>, ptr %B
    %v16i16 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
    ret <16 x i16> %v16i16
 }
@@ -73,14 +73,14 @@ define <4 x i32> @concat7(<2 x i32> %A, <2 x i32> %B) {
    ret <4 x i32> %v4i32
 }
 
-define <8 x i32> @concat8(<4 x i32>* %A, <4 x i32>* %B) {
+define <8 x i32> @concat8(ptr %A, ptr %B) {
 ; CHECK-LABEL: concat8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    ret
-   %tmp1 = load <4 x i32>, <4 x i32>* %A
-   %tmp2 = load <4 x i32>, <4 x i32>* %B
+   %tmp1 = load <4 x i32>, ptr %A
+   %tmp2 = load <4 x i32>, ptr %B
    %v8i32 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
    ret <8 x i32> %v8i32
 }

diff  --git a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
index 2652df4046f7d..458a376b50203 100644
--- a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
+++ b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
@@ -6,7 +6,7 @@ target triple = "aarch64-linaro-linux-gnueabi"
 
 ; CMN is an alias of ADDS.
 
-define void @test_add_cbz(i32 %a, i32 %b, i32* %ptr) {
+define void @test_add_cbz(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_add_cbz:
 ; CHECK:       // %bb.0: // %common.ret
 ; CHECK-NEXT:    cmn w0, w1
@@ -17,14 +17,14 @@ define void @test_add_cbz(i32 %a, i32 %b, i32* %ptr) {
   %d = icmp ne i32 %c, 0
   br i1 %d, label %L1, label %L2
 L1:
-  store i32 0, i32* %ptr, align 4
+  store i32 0, ptr %ptr, align 4
   ret void
 L2:
-  store i32 1, i32* %ptr, align 4
+  store i32 1, ptr %ptr, align 4
   ret void
 }
 
-define void @test_add_cbz_multiple_use(i32 %a, i32 %b, i32* %ptr) {
+define void @test_add_cbz_multiple_use(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_add_cbz_multiple_use:
 ; CHECK:       // %bb.0: // %common.ret
 ; CHECK-NEXT:    mov w8, #10
@@ -36,14 +36,14 @@ define void @test_add_cbz_multiple_use(i32 %a, i32 %b, i32* %ptr) {
   %d = icmp ne i32 %c, 0
   br i1 %d, label %L1, label %L2
 L1:
-  store i32 10, i32* %ptr, align 4
+  store i32 10, ptr %ptr, align 4
   ret void
 L2:
-  store i32 %c, i32* %ptr, align 4
+  store i32 %c, ptr %ptr, align 4
   ret void
 }
 
-define void @test_add_cbz_64(i64 %a, i64 %b, i64* %ptr) {
+define void @test_add_cbz_64(i64 %a, i64 %b, ptr %ptr) {
 ; CHECK-LABEL: test_add_cbz_64:
 ; CHECK:       // %bb.0: // %common.ret
 ; CHECK-NEXT:    cmn x0, x1
@@ -54,14 +54,14 @@ define void @test_add_cbz_64(i64 %a, i64 %b, i64* %ptr) {
   %d = icmp ne i64 %c, 0
   br i1 %d, label %L1, label %L2
 L1:
-  store i64 0, i64* %ptr, align 4
+  store i64 0, ptr %ptr, align 4
   ret void
 L2:
-  store i64 1, i64* %ptr, align 4
+  store i64 1, ptr %ptr, align 4
   ret void
 }
 
-define void @test_and_cbz(i32 %a, i32* %ptr) {
+define void @test_and_cbz(i32 %a, ptr %ptr) {
 ; CHECK-LABEL: test_and_cbz:
 ; CHECK:       // %bb.0: // %common.ret
 ; CHECK-NEXT:    tst w0, #0x6
@@ -72,14 +72,14 @@ define void @test_and_cbz(i32 %a, i32* %ptr) {
   %d = icmp ne i32 %c, 0
   br i1 %d, label %L1, label %L2
 L1:
-  store i32 0, i32* %ptr, align 4
+  store i32 0, ptr %ptr, align 4
   ret void
 L2:
-  store i32 1, i32* %ptr, align 4
+  store i32 1, ptr %ptr, align 4
   ret void
 }
 
-define void @test_bic_cbnz(i32 %a, i32 %b, i32* %ptr) {
+define void @test_bic_cbnz(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_bic_cbnz:
 ; CHECK:       // %bb.0: // %common.ret
 ; CHECK-NEXT:    bics wzr, w1, w0
@@ -90,14 +90,14 @@ define void @test_bic_cbnz(i32 %a, i32 %b, i32* %ptr) {
   %d = icmp eq i32 %c, %b
   br i1 %d, label %L1, label %L2
 L1:
-  store i32 0, i32* %ptr, align 4
+  store i32 0, ptr %ptr, align 4
   ret void
 L2:
-  store i32 1, i32* %ptr, align 4
+  store i32 1, ptr %ptr, align 4
   ret void
 }
 
-define void @test_add_tbz(i32 %a, i32 %b, i32* %ptr) {
+define void @test_add_tbz(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_add_tbz:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adds w8, w0, w1
@@ -111,13 +111,13 @@ entry:
   %cmp36 = icmp sge i32 %add, 0
   br i1 %cmp36, label %L2, label %L1
 L1:
-  store i32 %add, i32* %ptr, align 8
+  store i32 %add, ptr %ptr, align 8
   br label %L2
 L2:
   ret void
 }
 
-define void @test_subs_tbz(i32 %a, i32 %b, i32* %ptr) {
+define void @test_subs_tbz(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_subs_tbz:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w0, w1
@@ -131,13 +131,13 @@ entry:
   %cmp36 = icmp sge i32 %sub, 0
   br i1 %cmp36, label %L2, label %L1
 L1:
-  store i32 %sub, i32* %ptr, align 8
+  store i32 %sub, ptr %ptr, align 8
   br label %L2
 L2:
   ret void
 }
 
-define void @test_add_tbnz(i32 %a, i32 %b, i32* %ptr) {
+define void @test_add_tbnz(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_add_tbnz:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adds w8, w0, w1
@@ -151,13 +151,13 @@ entry:
   %cmp36 = icmp slt i32 %add, 0
   br i1 %cmp36, label %L2, label %L1
 L1:
-  store i32 %add, i32* %ptr, align 8
+  store i32 %add, ptr %ptr, align 8
   br label %L2
 L2:
   ret void
 }
 
-define void @test_subs_tbnz(i32 %a, i32 %b, i32* %ptr) {
+define void @test_subs_tbnz(i32 %a, i32 %b, ptr %ptr) {
 ; CHECK-LABEL: test_subs_tbnz:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w0, w1
@@ -171,7 +171,7 @@ entry:
   %cmp36 = icmp slt i32 %sub, 0
   br i1 %cmp36, label %L2, label %L1
 L1:
-  store i32 %sub, i32* %ptr, align 8
+  store i32 %sub, ptr %ptr, align 8
   br label %L2
 L2:
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/cond-sel.ll b/llvm/test/CodeGen/AArch64/cond-sel.ll
index fc4b42d6091ce..34578f43c6649 100644
--- a/llvm/test/CodeGen/AArch64/cond-sel.ll
+++ b/llvm/test/CodeGen/AArch64/cond-sel.ll
@@ -9,7 +9,7 @@ define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
 
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, i32 42, i32 52
-  store i32 %val1, i32* @var32
+  store i32 %val1, ptr @var32
 ; CHECK-DAG: mov [[W52:w[0-9]+]], #{{52|0x34}}
 ; CHECK-DAG: mov [[W42:w[0-9]+]], #{{42|0x2a}}
 ; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi
@@ -17,7 +17,7 @@ define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %rhs64 = sext i32 %rhs32 to i64
   %tst2 = icmp sle i64 %lhs64, %rhs64
   %val2 = select i1 %tst2, i64 %lhs64, i64 %rhs64
-  store i64 %val2, i64* @var64
+  store i64 %val2, ptr @var64
 ; CHECK: sxtw [[EXT_RHS:x[0-9]+]], {{[wx]}}[[RHS:[0-9]+]]
 ; CHECK: cmp [[LHS:x[0-9]+]], w[[RHS]], sxtw
 ; CHECK: csel {{x[0-9]+}}, [[LHS]], [[EXT_RHS]], le
@@ -33,7 +33,7 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r
 ; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
 ; CHECK-NOFP-NOT: fcmp
   %val1 = select i1 %tst1, i32 42, i32 52
-  store i32 %val1, i32* @var32
+  store i32 %val1, ptr @var32
 ; CHECK: mov [[W52:w[0-9]+]], #{{52|0x34}}
 ; CHECK: mov [[W42:w[0-9]+]], #{{42|0x2a}}
 ; CHECK: csel [[MAYBETRUE:w[0-9]+]], [[W42]], [[W52]], mi
@@ -44,7 +44,7 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r
 ; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
 ; CHECK-NOFP-NOT: fcmp
   %val2 = select i1 %tst2, i64 9, i64 15
-  store i64 %val2, i64* @var64
+  store i64 %val2, ptr @var64
 ; CHECK: mov w[[CONST15:[0-9]+]], #15
 ; CHECK: mov {{[wx]}}[[CONST9:[0-9]+]], #{{9|0x9}}
 ; CHECK: csel [[MAYBETRUE:x[0-9]+]], x[[CONST9]], x[[CONST15]], eq
@@ -62,7 +62,7 @@ define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %inc1 = add i32 %rhs32, 1
   %val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
 ; CHECK: csinc {{w[0-9]+}}, [[LHS]], [[RHS]], ls
 
@@ -70,7 +70,7 @@ define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst2 = icmp sle i32 %lhs32, %rhs2
   %inc2 = add i32 %rhs32, 1
   %val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
-  store volatile i32 %val2, i32* @var32
+  store volatile i32 %val2, ptr @var32
 ; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
 ; CHECK: csinc {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
 
@@ -79,7 +79,7 @@ define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst3 = icmp ugt i64 %lhs64, %rhs3
   %inc3 = add i64 %rhs3, 1
   %val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
-  store volatile i64 %val3, i64* @var64
+  store volatile i64 %val3, ptr @var64
 ; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
 ; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
 
@@ -87,7 +87,7 @@ define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst4 = icmp sle i64 %lhs64, %rhs4
   %inc4 = add i64 %rhs4, 1
   %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
-  store volatile i64 %val4, i64* @var64
+  store volatile i64 %val4, ptr @var64
 ; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
 ; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
 
@@ -102,7 +102,7 @@ define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %inc1 = xor i32 -1, %rhs32
   %val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
 ; CHECK: csinv {{w[0-9]+}}, [[LHS]], [[RHS]], ls
 
@@ -110,7 +110,7 @@ define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst2 = icmp sle i32 %lhs32, %rhs2
   %inc2 = xor i32 -1, %rhs32
   %val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
-  store volatile i32 %val2, i32* @var32
+  store volatile i32 %val2, ptr @var32
 ; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
 ; CHECK: csinv {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
 
@@ -119,7 +119,7 @@ define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst3 = icmp ugt i64 %lhs64, %rhs3
   %inc3 = xor i64 -1, %rhs3
   %val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
-  store volatile i64 %val3, i64* @var64
+  store volatile i64 %val3, ptr @var64
 ; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
 ; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
 
@@ -127,7 +127,7 @@ define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst4 = icmp sle i64 %lhs64, %rhs4
   %inc4 = xor i64 -1, %rhs4
   %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
-  store volatile i64 %val4, i64* @var64
+  store volatile i64 %val4, ptr @var64
 ; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
 ; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
 
@@ -140,14 +140,14 @@ define void @test_csinv0(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) minsize
 
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, i32 0, i32 -1
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
 ; CHECK: csetm {{w[0-9]+}}, ls
 
   %rhs2 = add i32 %rhs32, 42
   %tst2 = icmp sle i32 %lhs32, %rhs2
   %val2 = select i1 %tst2, i32 -1, i32 %rhs2
-  store volatile i32 %val2, i32* @var32
+  store volatile i32 %val2, ptr @var32
 ; CHECK: cmp [[LHS2:w[0-9]+]], [[RHS2:w[0-9]+]]
 ; CHECK: csinv {{w[0-9]+}}, [[RHS2]], wzr, gt
 
@@ -155,7 +155,7 @@ define void @test_csinv0(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) minsize
   %rhs3 = mul i64 %rhs64, 19
   %tst3 = icmp ugt i64 %lhs64, %rhs3
   %val3 = select i1 %tst3, i64 %rhs3, i64 -1
-  store volatile i64 %val3, i64* @var64
+  store volatile i64 %val3, ptr @var64
 ; CHECK: cmp [[LHS3:x[0-9]+]], [[RHS3:x[0-9]+]]
 ; CHECK: csinv {{x[0-9]+}}, [[RHS3]], xzr, hi
 
@@ -170,7 +170,7 @@ define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %inc1 = sub i32 0, %rhs32
   %val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
 ; CHECK: csneg {{w[0-9]+}}, [[LHS]], [[RHS]], ls
 
@@ -178,7 +178,7 @@ define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst2 = icmp sle i32 %lhs32, %rhs2
   %inc2 = sub i32 0, %rhs32
   %val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
-  store volatile i32 %val2, i32* @var32
+  store volatile i32 %val2, ptr @var32
 ; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
 ; CHECK: csneg {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
 
@@ -187,7 +187,7 @@ define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst3 = icmp ugt i64 %lhs64, %rhs3
   %inc3 = sub i64 0, %rhs3
   %val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
-  store volatile i64 %val3, i64* @var64
+  store volatile i64 %val3, ptr @var64
 ; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
 ; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
 
@@ -195,7 +195,7 @@ define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
   %tst4 = icmp sle i64 %lhs64, %rhs4
   %inc4 = sub i64 0, %rhs4
   %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
-  store volatile i64 %val4, i64* @var64
+  store volatile i64 %val4, ptr @var64
 ; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
 ; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
 
@@ -210,14 +210,14 @@ define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
 ; incoming DAG is too complex
   %tst1 = icmp eq i32 %lhs, %rhs
   %val1 = zext i1 %tst1 to i32
-  store i32 %val1, i32* @var32
+  store i32 %val1, ptr @var32
 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
 ; CHECK: cset {{w[0-9]+}}, eq
 
   %rhs64 = sext i32 %rhs to i64
   %tst2 = icmp ule i64 %lhs64, %rhs64
   %val2 = zext i1 %tst2 to i64
-  store i64 %val2, i64* @var64
+  store i64 %val2, ptr @var64
 ; CHECK: cset {{w[0-9]+}}, ls
 
   ret void
@@ -229,14 +229,14 @@ define void @test_csetm(i32 %lhs, i32 %rhs, i64 %lhs64) {
 
   %tst1 = icmp eq i32 %lhs, %rhs
   %val1 = sext i1 %tst1 to i32
-  store i32 %val1, i32* @var32
+  store i32 %val1, ptr @var32
 ; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
 ; CHECK: csetm {{w[0-9]+}}, eq
 
   %rhs64 = sext i32 %rhs to i64
   %tst2 = icmp ule i64 %lhs64, %rhs64
   %val2 = sext i1 %tst2 to i64
-  store i64 %val2, i64* @var64
+  store i64 %val2, ptr @var64
 ; CHECK: csetm {{x[0-9]+}}, ls
 
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/consthoist-gep.ll b/llvm/test/CodeGen/AArch64/consthoist-gep.ll
index 6592cebea38a6..bd74d5f8b8d54 100644
--- a/llvm/test/CodeGen/AArch64/consthoist-gep.ll
+++ b/llvm/test/CodeGen/AArch64/consthoist-gep.ll
@@ -74,35 +74,35 @@ define dso_local void @blam() local_unnamed_addr #0 {
 ; CHECK-NEXT:  .LBB0_2: // %bb19
 ; CHECK-NEXT:    ret
 bb:
-  %tmp = load i8, i8* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 7, i32 9), align 2, !tbaa !3
+  %tmp = load i8, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 7, i32 9), align 2, !tbaa !3
   %tmp1 = and i8 %tmp, 1
   %tmp2 = icmp eq i8 %tmp1, 0
   br i1 %tmp2, label %bb3, label %bb19
 
 bb3:                                              ; preds = %bb
-  %tmp4 = load volatile i32, i32* inttoptr (i32 805874688 to i32*), align 1024, !tbaa !23
-  store i32 %tmp4, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 0, i32 0), align 4, !tbaa !23
-  %tmp5 = load volatile i32, i32* inttoptr (i32 805874692 to i32*), align 4, !tbaa !23
+  %tmp4 = load volatile i32, ptr inttoptr (i32 805874688 to ptr), align 1024, !tbaa !23
+  store i32 %tmp4, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 0, i32 0), align 4, !tbaa !23
+  %tmp5 = load volatile i32, ptr inttoptr (i32 805874692 to ptr), align 4, !tbaa !23
   %tmp6 = and i32 %tmp5, 65535
-  store i32 %tmp6, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 0, i32 1), align 4, !tbaa !23
-  %tmp7 = load volatile i32, i32* inttoptr (i32 805874696 to i32*), align 8, !tbaa !23
+  store i32 %tmp6, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 0, i32 1), align 4, !tbaa !23
+  %tmp7 = load volatile i32, ptr inttoptr (i32 805874696 to ptr), align 8, !tbaa !23
   %tmp8 = and i32 %tmp7, 522133279
-  store i32 %tmp8, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 0, i32 2), align 4, !tbaa !23
-  %tmp9 = load volatile i32, i32* inttoptr (i32 805874700 to i32*), align 4, !tbaa !23
+  store i32 %tmp8, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 0, i32 2), align 4, !tbaa !23
+  %tmp9 = load volatile i32, ptr inttoptr (i32 805874700 to ptr), align 4, !tbaa !23
   %tmp10 = and i32 %tmp9, 522133279
-  store i32 %tmp10, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 0, i32 3), align 4, !tbaa !23
-  %tmp11 = load volatile i32, i32* inttoptr (i32 805874860 to i32*), align 4, !tbaa !23
+  store i32 %tmp10, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 0, i32 3), align 4, !tbaa !23
+  %tmp11 = load volatile i32, ptr inttoptr (i32 805874860 to ptr), align 4, !tbaa !23
   %tmp12 = and i32 %tmp11, 16777215
-  store i32 %tmp12, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 15), align 4, !tbaa !24
-  %tmp13 = load volatile i32, i32* inttoptr (i32 805874864 to i32*), align 16, !tbaa !23
+  store i32 %tmp12, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 15), align 4, !tbaa !24
+  %tmp13 = load volatile i32, ptr inttoptr (i32 805874864 to ptr), align 16, !tbaa !23
   %tmp14 = and i32 %tmp13, 16777215
-  store i32 %tmp14, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 16), align 4, !tbaa !25
-  %tmp15 = load volatile i32, i32* inttoptr (i32 805874868 to i32*), align 4, !tbaa !23
+  store i32 %tmp14, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 16), align 4, !tbaa !25
+  %tmp15 = load volatile i32, ptr inttoptr (i32 805874868 to ptr), align 4, !tbaa !23
   %tmp16 = and i32 %tmp15, 16777215
-  store i32 %tmp16, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 17), align 4, !tbaa !26
-  %tmp17 = load volatile i32, i32* inttoptr (i32 805874872 to i32*), align 8, !tbaa !23
+  store i32 %tmp16, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 17), align 4, !tbaa !26
+  %tmp17 = load volatile i32, ptr inttoptr (i32 805874872 to ptr), align 8, !tbaa !23
   %tmp18 = and i32 %tmp17, 16777215
-  store i32 %tmp18, i32* getelementptr inbounds (%struct.blam, %struct.blam* @global, i32 0, i32 13, i32 18), align 4, !tbaa !27
+  store i32 %tmp18, ptr getelementptr inbounds (%struct.blam, ptr @global, i32 0, i32 13, i32 18), align 4, !tbaa !27
   br label %bb19
 
 bb19:                                             ; preds = %bb3, %bb

diff  --git a/llvm/test/CodeGen/AArch64/convertphitype.ll b/llvm/test/CodeGen/AArch64/convertphitype.ll
index 22d130d4000a1..a5fc46d2abcaa 100644
--- a/llvm/test/CodeGen/AArch64/convertphitype.ll
+++ b/llvm/test/CodeGen/AArch64/convertphitype.ll
@@ -4,17 +4,17 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-define float @convphi1(i32 *%s, i32 *%d, i32 %n) {
+define float @convphi1(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi1(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast i32 [[LD]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -26,11 +26,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br label %end
 
 end:
@@ -39,13 +39,13 @@ end:
   ret float %b
 }
 
-define float @convphi2(i32 *%s, i32 *%d, i32 %n) {
+define float @convphi2(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -57,7 +57,7 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
@@ -66,13 +66,13 @@ end:
   ret float %b
 }
 
-define float @convphi3(i32 *%s, i32 *%d, i32 %n, float %f) {
+define float @convphi3(ptr %s, ptr %d, i32 %n, float %f) {
 ; CHECK-LABEL: @convphi3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -85,7 +85,7 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
@@ -94,19 +94,19 @@ end:
   ret float %b
 }
 
-define void @convphi4(i32 *%s, i32 *%d, i32 %n, float %f) {
+define void @convphi4(ptr %s, ptr %d, i32 %n, float %f) {
 ; CHECK-LABEL: @convphi4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI_TC:%.*]] = phi float [ [[LS_BC]], [[THEN]] ], [ [[F:%.*]], [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    [[BC:%.*]] = bitcast float [[PHI_TC]] to i32
-; CHECK-NEXT:    store i32 [[BC]], i32* [[D:%.*]], align 4
+; CHECK-NEXT:    store i32 [[BC]], ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -115,26 +115,26 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
   %phi = phi i32 [ %ls, %then ], [ %fb, %entry ]
-  store i32 %phi, i32 *%d
+  store i32 %phi, ptr %d
   ret void
 }
 
-define i64 @convphi_d2i(double *%s, double *%d, i32 %n) {
+define i64 @convphi_d2i(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi_d2i(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load double, double* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load double, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast double [[LS]] to i64
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load double, double* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load double, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast double [[LD]] to i64
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -146,11 +146,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load double, double* %s, align 4
+  %ls = load double, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load double, double* %d, align 4
+  %ld = load double, ptr %d, align 4
   br label %end
 
 end:
@@ -159,17 +159,17 @@ end:
   ret i64 %b
 }
 
-define i32 @convphi_f2i(float *%s, float *%d, i32 %n) {
+define i32 @convphi_f2i(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi_f2i(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load float, float* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load float, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast float [[LS]] to i32
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load float, float* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load float, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast float [[LD]] to i32
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -181,11 +181,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load float, float* %s, align 4
+  %ls = load float, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load float, float* %d, align 4
+  %ld = load float, ptr %d, align 4
   br label %end
 
 end:
@@ -194,17 +194,17 @@ end:
   ret i32 %b
 }
 
-define i16 @convphi_h2i(half *%s, half *%d, i32 %n) {
+define i16 @convphi_h2i(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi_h2i(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load half, half* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load half, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast half [[LS]] to i16
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load half, half* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load half, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast half [[LD]] to i16
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -216,11 +216,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load half, half* %s, align 4
+  %ls = load half, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load half, half* %d, align 4
+  %ld = load half, ptr %d, align 4
   br label %end
 
 end:
@@ -229,17 +229,17 @@ end:
   ret i16 %b
 }
 
-define i128 @convphi_ld2i(fp128 *%s, fp128 *%d, i32 %n) {
+define i128 @convphi_ld2i(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi_ld2i(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load fp128, fp128* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load fp128, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast fp128 [[LS]] to i128
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load fp128, fp128* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load fp128, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast fp128 [[LD]] to i128
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -251,11 +251,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load fp128, fp128* %s, align 4
+  %ls = load fp128, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load fp128, fp128* %d, align 4
+  %ld = load fp128, ptr %d, align 4
   br label %end
 
 end:
@@ -264,16 +264,16 @@ end:
   ret i128 %b
 }
 
-define <4 x i32> @convphi_4xf2i(<4 x float> *%s, <4 x float> *%d, i32 %n) {
+define <4 x i32> @convphi_4xf2i(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi_4xf2i(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load <4 x float>, <4 x float>* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load <4 x float>, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load <4 x float>, <4 x float>* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load <4 x float>, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi <4 x float> [ [[LS]], [[THEN]] ], [ [[LD]], [[ELSE]] ]
@@ -285,11 +285,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load <4 x float>, <4 x float>* %s, align 4
+  %ls = load <4 x float>, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load <4 x float>, <4 x float>* %d, align 4
+  %ld = load <4 x float>, ptr %d, align 4
   br label %end
 
 end:
@@ -298,17 +298,17 @@ end:
   ret <4 x i32> %b
 }
 
-define float @convphi_loop(i32 *%s, i32 *%d, i64 %n) {
+define float @convphi_loop(ptr %s, ptr %d, i64 %n) {
 ; CHECK-LABEL: @convphi_loop(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i64 [[N:%.*]], 0
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[LOOP:%.*]], label [[END:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[LPHI_TC:%.*]] = phi float [ [[LS_BC]], [[ENTRY]] ], [ [[LD_BC:%.*]], [[LOOP]] ]
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC]] = bitcast i32 [[LD]] to float
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
@@ -319,13 +319,13 @@ define float @convphi_loop(i32 *%s, i32 *%d, i64 %n) {
 ;
 entry:
   %cmp15 = icmp sgt i64 %n, 0
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br i1 %cmp15, label %loop, label %end
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
   %lphi = phi i32 [ %ls, %entry ], [ %ld, %loop ]
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   %iv.next = add nuw nsw i64 %iv, 1
   %exitcond = icmp eq i64 %iv.next, %n
   br i1 %exitcond, label %end, label %loop
@@ -336,15 +336,15 @@ end:
   ret float %b
 }
 
-define float @convphi_loopdelayed(i32 *%s, i32 *%d, i64 %n) {
+define float @convphi_loopdelayed(ptr %s, ptr %d, i64 %n) {
 ; CHECK-LABEL: @convphi_loopdelayed(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i64 [[N:%.*]], 0
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[LOOP:%.*]], label [[END:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
 ; CHECK-NEXT:    br i1 [[EXITCOND]], label [[END]], label [[LOOP]]
@@ -354,13 +354,13 @@ define float @convphi_loopdelayed(i32 *%s, i32 *%d, i64 %n) {
 ;
 entry:
   %cmp15 = icmp sgt i64 %n, 0
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br i1 %cmp15, label %loop, label %end
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
   %lphi = phi i32 [ %ls, %entry ], [ %lphi, %loop ]
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   %iv.next = add nuw nsw i64 %iv, 1
   %exitcond = icmp eq i64 %iv.next, %n
   br i1 %exitcond, label %end, label %loop
@@ -371,18 +371,18 @@ end:
   ret float %b
 }
 
-define float @convphi_loopdelayed2(i32 *%s, i32 *%d, i64 %n) {
+define float @convphi_loopdelayed2(ptr %s, ptr %d, i64 %n) {
 ; CHECK-LABEL: @convphi_loopdelayed2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i64 [[N:%.*]], 0
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[LOOP:%.*]], label [[END:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[LPHI_TC:%.*]] = phi float [ [[LS_BC]], [[ENTRY]] ], [ [[LD_BC:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[LPHI2_TC:%.*]] = phi float [ undef, [[ENTRY]] ], [ [[LPHI_TC]], [[LOOP]] ]
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC]] = bitcast i32 [[LD]] to float
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
@@ -393,14 +393,14 @@ define float @convphi_loopdelayed2(i32 *%s, i32 *%d, i64 %n) {
 ;
 entry:
   %cmp15 = icmp sgt i64 %n, 0
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br i1 %cmp15, label %loop, label %end
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
   %lphi = phi i32 [ %ls, %entry ], [ %ld, %loop ]
   %lphi2 = phi i32 [ undef, %entry ], [ %lphi, %loop ]
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   %iv.next = add nuw nsw i64 %iv, 1
   %exitcond = icmp eq i64 %iv.next, %n
   br i1 %exitcond, label %end, label %loop
@@ -411,15 +411,15 @@ end:
   ret float %b
 }
 
-define float @convphi_loopmore(i32 *%s, i32 *%d, i64 %n) {
+define float @convphi_loopmore(ptr %s, ptr %d, i64 %n) {
 ; CHECK-LABEL: @convphi_loopmore(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i64 [[N:%.*]], 1
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br i1 [[CMP]], label [[THEN:%.*]], label [[IFEND:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast i32 [[LD]] to float
 ; CHECK-NEXT:    br label [[IFEND]]
 ; CHECK:       ifend:
@@ -432,7 +432,7 @@ define float @convphi_loopmore(i32 *%s, i32 *%d, i64 %n) {
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i64 [[N]], 1
 ; CHECK-NEXT:    br i1 [[TMP0]], label [[LOOPTHEN:%.*]], label [[LOOPEND]]
 ; CHECK:       loopthen:
-; CHECK-NEXT:    [[LL:%.*]] = load i32, i32* [[D]], align 4
+; CHECK-NEXT:    [[LL:%.*]] = load i32, ptr [[D]], align 4
 ; CHECK-NEXT:    [[LL_BC:%.*]] = bitcast i32 [[LL]] to float
 ; CHECK-NEXT:    br label [[LOOPEND]]
 ; CHECK:       loopend:
@@ -446,11 +446,11 @@ define float @convphi_loopmore(i32 *%s, i32 *%d, i64 %n) {
 ;
 entry:
   %cmp = icmp eq i64 %n, 1
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br i1 %cmp, label %then, label %ifend
 
 then:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br label %ifend
 
 ifend:
@@ -464,7 +464,7 @@ loop:
   br i1 %cmp, label %loopthen, label %loopend
 
 loopthen:
-  %ll = load i32, i32* %d, align 4
+  %ll = load i32, ptr %d, align 4
   br label %loopend
 
 loopend:
@@ -479,21 +479,21 @@ end:
   ret float %b
 }
 
-define void @convphi_stop(i32 *%s, i32 *%d, float *%e, i32 %n) {
+define void @convphi_stop(ptr %s, ptr %d, ptr %e, i32 %n) {
 ; CHECK-LABEL: @convphi_stop(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[LD]], [[ELSE]] ]
 ; CHECK-NEXT:    [[B:%.*]] = bitcast i32 [[PHI]] to float
-; CHECK-NEXT:    store float [[B]], float* [[E:%.*]], align 4
+; CHECK-NEXT:    store float [[B]], ptr [[E:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -501,36 +501,36 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br label %end
 
 end:
   %phi = phi i32 [ %ls, %then ], [ %ld, %else ]
   %b = bitcast i32 %phi to float
-  store float %b, float* %e, align 4
+  store float %b, ptr %e, align 4
   ret void
 }
 
-define void @convphi_stop2(i32 *%s, i32 *%d, float *%e, i32 %n) {
+define void @convphi_stop2(ptr %s, ptr %d, ptr %e, i32 %n) {
 ; CHECK-LABEL: @convphi_stop2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LSB:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LDB:%.*]] = bitcast i32 [[LD]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi float [ [[LSB]], [[THEN]] ], [ [[LDB]], [[ELSE]] ]
-; CHECK-NEXT:    store float [[PHI]], float* [[E:%.*]], align 4
+; CHECK-NEXT:    store float [[PHI]], ptr [[E:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -538,37 +538,37 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   %lsb = bitcast i32 %ls to float
   br label %end
 
 else:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   %ldb = bitcast i32 %ld to float
   br label %end
 
 end:
   %phi = phi float [ %lsb, %then ], [ %ldb, %else ]
-  store float %phi, float* %e, align 4
+  store float %phi, ptr %e, align 4
   ret void
 }
 
-define float @convphi_stop3(i32 *%s, i32 *%d, float *%e, i32 %n) {
+define float @convphi_stop3(ptr %s, ptr %d, ptr %e, i32 %n) {
 ; CHECK-LABEL: @convphi_stop3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast i32 [[LD]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI_TC:%.*]] = phi float [ [[LS_BC]], [[THEN]] ], [ [[LD_BC]], [[ELSE]] ]
-; CHECK-NEXT:    store float [[PHI_TC]], float* [[E:%.*]], align 4
+; CHECK-NEXT:    store float [[PHI_TC]], ptr [[E:%.*]], align 4
 ; CHECK-NEXT:    ret float [[PHI_TC]]
 ;
 entry:
@@ -576,82 +576,82 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br label %end
 
 end:
   %phi = phi i32 [ %ls, %then ], [ %ld, %else ]
   %b = bitcast i32 %phi to float
-  store float %b, float* %e, align 4
+  store float %b, ptr %e, align 4
   ret float %b
 }
 
-define void @convphi_stop4(i32 *%s, i32 *%d, float *%e, i32 %n) {
+define void @convphi_stop4(ptr %s, ptr %d, ptr %e, i32 %n) {
 ; CHECK-LABEL: @convphi_stop4(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    [[LD_BC:%.*]] = bitcast i32 [[LD]] to float
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI_TC:%.*]] = phi float [ [[LS_BC]], [[THEN]] ], [ [[LD_BC]], [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp sgt i32 [[N]], 0
 ; CHECK-NEXT:    [[BC:%.*]] = bitcast float [[PHI_TC]] to i32
-; CHECK-NEXT:    store i32 [[BC]], i32* [[S]], align 4
+; CHECK-NEXT:    store i32 [[BC]], ptr [[S]], align 4
 ; CHECK-NEXT:    br i1 [[TMP0]], label [[THEN2:%.*]], label [[END2:%.*]]
 ; CHECK:       then2:
-; CHECK-NEXT:    [[LF:%.*]] = load float, float* [[E:%.*]], align 4
+; CHECK-NEXT:    [[LF:%.*]] = load float, ptr [[E:%.*]], align 4
 ; CHECK-NEXT:    br label [[END2]]
 ; CHECK:       end2:
 ; CHECK-NEXT:    [[PHI2:%.*]] = phi float [ [[PHI_TC]], [[END]] ], [ [[LF]], [[THEN2]] ]
-; CHECK-NEXT:    store float [[PHI2]], float* [[E]], align 4
+; CHECK-NEXT:    store float [[PHI2]], ptr [[E]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %cmp15 = icmp sgt i32 %n, 0
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
   %phi = phi i32 [ %ls, %then ], [ %ld, %entry ]
   %phib = bitcast i32 %phi to float
-  store i32 %phi, i32* %s, align 4
+  store i32 %phi, ptr %s, align 4
   br i1 %cmp15, label %then2, label %end2
 
 then2:
-  %lf = load float, float* %e, align 4
+  %lf = load float, ptr %e, align 4
   br label %end2
 
 end2:
   %phi2 = phi float [ %phib, %end ], [ %lf, %then2 ]
-  store float %phi2, float* %e, align 4
+  store float %phi2, ptr %e, align 4
   ret void
 }
 
-define float @multiuse(i32 *%s, i32 *%d, i32 %n) {
+define float @multiuse(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @multiuse(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[A:%.*]] = add i32 [[LS]], 2
-; CHECK-NEXT:    store i32 [[A]], i32* [[D:%.*]], align 4
+; CHECK-NEXT:    store i32 [[A]], ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[LD]], [[ELSE]] ]
@@ -663,13 +663,13 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   %a = add i32 %ls, 2
-  store i32 %a, i32* %d, align 4
+  store i32 %a, ptr %d, align 4
   br label %end
 
 else:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br label %end
 
 end:
@@ -678,16 +678,16 @@ end:
   ret float %b
 }
 
-define float @convphi_volatile(i32 *%s, i32 *%d, i32 %n) {
+define float @convphi_volatile(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi_volatile(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load volatile i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load volatile i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[LD]], [[ELSE]] ]
@@ -700,11 +700,11 @@ define float @convphi_volatile(i32 *%s, i32 *%d, i32 %n) {
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i1 [[CMP15]], metadata !353, metadata !DIExpression()), !dbg !358
 ; DEBUG-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]], !dbg !359
 ; DEBUG:       then:
-; DEBUG-NEXT:    [[LS:%.*]] = load volatile i32, i32* [[S:%.*]], align 4, !dbg !360
+; DEBUG-NEXT:    [[LS:%.*]] = load volatile i32, ptr [[S:%.*]], align 4, !dbg !360
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[LS]], metadata !354, metadata !DIExpression()), !dbg !360
 ; DEBUG-NEXT:    br label [[END:%.*]], !dbg !361
 ; DEBUG:       else:
-; DEBUG-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4, !dbg !362
+; DEBUG-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4, !dbg !362
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[LD]], metadata !355, metadata !DIExpression()), !dbg !362
 ; DEBUG-NEXT:    br label [[END]], !dbg !363
 ; DEBUG:       end:
@@ -718,11 +718,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load volatile i32, i32* %s, align 4
+  %ls = load volatile i32, ptr %s, align 4
   br label %end
 
 else:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br label %end
 
 end:
@@ -731,18 +731,18 @@ end:
   ret float %b
 }
 
-define void @convphi_volatile2(i32 *%s, i32 *%d, i32 %n, float %f) {
+define void @convphi_volatile2(ptr %s, ptr %d, i32 %n, float %f) {
 ; CHECK-LABEL: @convphi_volatile2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    [[FB:%.*]] = bitcast float [[F:%.*]] to i32
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[FB]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store volatile i32 [[PHI]], i32* [[D:%.*]], align 4
+; CHECK-NEXT:    store volatile i32 [[PHI]], ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; DEBUG-LABEL: @convphi_volatile2(
@@ -753,13 +753,13 @@ define void @convphi_volatile2(i32 *%s, i32 *%d, i32 %n, float %f) {
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[FB]], metadata !370, metadata !DIExpression()), !dbg !374
 ; DEBUG-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]], !dbg !375
 ; DEBUG:       then:
-; DEBUG-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4, !dbg !376
+; DEBUG-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4, !dbg !376
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[LS]], metadata !371, metadata !DIExpression()), !dbg !376
 ; DEBUG-NEXT:    br label [[END]], !dbg !377
 ; DEBUG:       end:
 ; DEBUG-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[FB]], [[ENTRY:%.*]] ], !dbg !378
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[PHI]], metadata !372, metadata !DIExpression()), !dbg !378
-; DEBUG-NEXT:    store volatile i32 [[PHI]], i32* [[D:%.*]], align 4, !dbg !379
+; DEBUG-NEXT:    store volatile i32 [[PHI]], ptr [[D:%.*]], align 4, !dbg !379
 ; DEBUG-NEXT:    ret void, !dbg !380
 entry:
   %cmp15 = icmp sgt i32 %n, 0
@@ -767,25 +767,25 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
   %phi = phi i32 [ %ls, %then ], [ %fb, %entry ]
-  store volatile i32 %phi, i32 *%d
+  store volatile i32 %phi, ptr %d
   ret void
 }
 
-define float @convphi_atomic(i32 *%s, i32 *%d, i32 %n) {
+define float @convphi_atomic(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi_atomic(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load atomic i32, i32* [[S:%.*]] acquire, align 4
+; CHECK-NEXT:    [[LS:%.*]] = load atomic i32, ptr [[S:%.*]] acquire, align 4
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       else:
-; CHECK-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4
+; CHECK-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[LD]], [[ELSE]] ]
@@ -798,11 +798,11 @@ define float @convphi_atomic(i32 *%s, i32 *%d, i32 %n) {
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i1 [[CMP15]], metadata !383, metadata !DIExpression()), !dbg !388
 ; DEBUG-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[ELSE:%.*]], !dbg !389
 ; DEBUG:       then:
-; DEBUG-NEXT:    [[LS:%.*]] = load atomic i32, i32* [[S:%.*]] acquire, align 4, !dbg !390
+; DEBUG-NEXT:    [[LS:%.*]] = load atomic i32, ptr [[S:%.*]] acquire, align 4, !dbg !390
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[LS]], metadata !384, metadata !DIExpression()), !dbg !390
 ; DEBUG-NEXT:    br label [[END:%.*]], !dbg !391
 ; DEBUG:       else:
-; DEBUG-NEXT:    [[LD:%.*]] = load i32, i32* [[D:%.*]], align 4, !dbg !392
+; DEBUG-NEXT:    [[LD:%.*]] = load i32, ptr [[D:%.*]], align 4, !dbg !392
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[LD]], metadata !385, metadata !DIExpression()), !dbg !392
 ; DEBUG-NEXT:    br label [[END]], !dbg !393
 ; DEBUG:       end:
@@ -816,11 +816,11 @@ entry:
   br i1 %cmp15, label %then, label %else
 
 then:
-  %ls = load atomic i32, i32* %s acquire, align 4
+  %ls = load atomic i32, ptr %s acquire, align 4
   br label %end
 
 else:
-  %ld = load i32, i32* %d, align 4
+  %ld = load i32, ptr %d, align 4
   br label %end
 
 end:
@@ -829,18 +829,18 @@ end:
   ret float %b
 }
 
-define void @convphi_atomic2(i32 *%s, i32 *%d, i32 %n, float %f) {
+define void @convphi_atomic2(ptr %s, ptr %d, i32 %n, float %f) {
 ; CHECK-LABEL: @convphi_atomic2(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    [[FB:%.*]] = bitcast float [[F:%.*]] to i32
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[FB]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    store atomic i32 [[PHI]], i32* [[D:%.*]] release, align 4
+; CHECK-NEXT:    store atomic i32 [[PHI]], ptr [[D:%.*]] release, align 4
 ; CHECK-NEXT:    ret void
 ;
 ; DEBUG-LABEL: @convphi_atomic2(
@@ -851,13 +851,13 @@ define void @convphi_atomic2(i32 *%s, i32 *%d, i32 %n, float %f) {
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[FB]], metadata !400, metadata !DIExpression()), !dbg !404
 ; DEBUG-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]], !dbg !405
 ; DEBUG:       then:
-; DEBUG-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4, !dbg !406
+; DEBUG-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4, !dbg !406
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[LS]], metadata !401, metadata !DIExpression()), !dbg !406
 ; DEBUG-NEXT:    br label [[END]], !dbg !407
 ; DEBUG:       end:
 ; DEBUG-NEXT:    [[PHI:%.*]] = phi i32 [ [[LS]], [[THEN]] ], [ [[FB]], [[ENTRY:%.*]] ], !dbg !408
 ; DEBUG-NEXT:    call void @llvm.dbg.value(metadata i32 [[PHI]], metadata !402, metadata !DIExpression()), !dbg !408
-; DEBUG-NEXT:    store atomic i32 [[PHI]], i32* [[D:%.*]] release, align 4, !dbg !409
+; DEBUG-NEXT:    store atomic i32 [[PHI]], ptr [[D:%.*]] release, align 4, !dbg !409
 ; DEBUG-NEXT:    ret void, !dbg !410
 entry:
   %cmp15 = icmp sgt i32 %n, 0
@@ -865,22 +865,22 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
   %phi = phi i32 [ %ls, %then ], [ %fb, %entry ]
-  store atomic i32 %phi, i32 *%d release, align 4
+  store atomic i32 %phi, ptr %d release, align 4
   ret void
 }
 
-define float @convphi2_zero(i32 *%s, i32 *%d, i32 %n) {
+define float @convphi2_zero(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi2_zero(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -892,7 +892,7 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
@@ -901,13 +901,13 @@ end:
   ret float %b
 }
 
-define i32 @convphi2f_zero(float *%s, float *%d, i32 %n) {
+define i32 @convphi2f_zero(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi2f_zero(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load float, float* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load float, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast float [[LS]] to i32
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -919,7 +919,7 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load float, float* %s, align 4
+  %ls = load float, ptr %s, align 4
   br label %end
 
 end:
@@ -928,13 +928,13 @@ end:
   ret i32 %b
 }
 
-define float @convphi2_ten(i32 *%s, i32 *%d, i32 %n) {
+define float @convphi2_ten(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi2_ten(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load i32, i32* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load i32, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast i32 [[LS]] to float
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -946,7 +946,7 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load i32, i32* %s, align 4
+  %ls = load i32, ptr %s, align 4
   br label %end
 
 end:
@@ -955,13 +955,13 @@ end:
   ret float %b
 }
 
-define i32 @convphi2f_ten(float *%s, float *%d, i32 %n) {
+define i32 @convphi2f_ten(ptr %s, ptr %d, i32 %n) {
 ; CHECK-LABEL: @convphi2f_ten(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP15:%.*]] = icmp sgt i32 [[N:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP15]], label [[THEN:%.*]], label [[END:%.*]]
 ; CHECK:       then:
-; CHECK-NEXT:    [[LS:%.*]] = load float, float* [[S:%.*]], align 4
+; CHECK-NEXT:    [[LS:%.*]] = load float, ptr [[S:%.*]], align 4
 ; CHECK-NEXT:    [[LS_BC:%.*]] = bitcast float [[LS]] to i32
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
@@ -973,7 +973,7 @@ entry:
   br i1 %cmp15, label %then, label %end
 
 then:
-  %ls = load float, float* %s, align 4
+  %ls = load float, ptr %s, align 4
   br label %end
 
 end:

diff  --git a/llvm/test/CodeGen/AArch64/copyprop.ll b/llvm/test/CodeGen/AArch64/copyprop.ll
index a605f23baba21..6fcb8d435a4e3 100644
--- a/llvm/test/CodeGen/AArch64/copyprop.ll
+++ b/llvm/test/CodeGen/AArch64/copyprop.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -O3 -mtriple=aarch64-- | FileCheck %s
 
-define void @copyprop_after_mbp(i32 %v, i32* %a, i32* %b, i32* %c, i32* %d) {
+define void @copyprop_after_mbp(i32 %v, ptr %a, ptr %b, ptr %c, ptr %d) {
 ; CHECK-LABEL: copyprop_after_mbp:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp w0, #10
@@ -25,16 +25,16 @@ define void @copyprop_after_mbp(i32 %v, i32* %a, i32* %b, i32* %c, i32* %d) {
   br i1 %1, label %bb.0, label %bb.1
 
 bb.0:
-  store i32 15, i32* %b, align 4
+  store i32 15, ptr %b, align 4
   br label %bb.2
 
 bb.1:
-  store i32 25, i32* %c, align 4
+  store i32 25, ptr %c, align 4
   br label %bb.2
 
 bb.2:
   %2 = phi i32 [ 1, %bb.0 ], [ 0, %bb.1 ]
-  store i32 %2, i32* %a, align 4
-  store i32 12, i32* %d, align 4
+  store i32 %2, ptr %a, align 4
+  store i32 12, ptr %d, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/csel-zero-float.ll b/llvm/test/CodeGen/AArch64/csel-zero-float.ll
index 9869c651f56f5..6edde13f0a7c7 100644
--- a/llvm/test/CodeGen/AArch64/csel-zero-float.ll
+++ b/llvm/test/CodeGen/AArch64/csel-zero-float.ll
@@ -2,8 +2,8 @@
 ; There is no invocation to FileCheck as this
 ; caused a crash in "Post-RA pseudo instruction expansion"
 
-define double @foo(float *%user, float %t17) {
-  %t16 = load float, float* %user, align 8
+define double @foo(ptr %user, float %t17) {
+  %t16 = load float, ptr %user, align 8
   %conv = fpext float %t16 to double
   %cmp26 = fcmp fast oeq float %t17, 0.000000e+00
   %div = fdiv fast float %t16, %t17

diff  --git a/llvm/test/CodeGen/AArch64/csr-split.ll b/llvm/test/CodeGen/AArch64/csr-split.ll
index 2251f6e285d56..9143ddd545cdf 100644
--- a/llvm/test/CodeGen/AArch64/csr-split.ll
+++ b/llvm/test/CodeGen/AArch64/csr-split.ll
@@ -6,7 +6,7 @@
 
 @a = common dso_local local_unnamed_addr global i32 0, align 4
 
-define dso_local signext i32 @test1(i32* %b) local_unnamed_addr uwtable  {
+define dso_local signext i32 @test1(ptr %b) local_unnamed_addr uwtable  {
 ; CHECK-LABEL: test1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -75,15 +75,15 @@ define dso_local signext i32 @test1(i32* %b) local_unnamed_addr uwtable  {
 ; CHECK-APPLE-NEXT:    b _callNonVoid
 ; CHECK-APPLE-NEXT:    .loh AdrpLdr Lloh0, Lloh1
 entry:
-  %0 = load i32, i32* @a, align 4, !tbaa !2
+  %0 = load i32, ptr @a, align 4, !tbaa !2
   %conv = sext i32 %0 to i64
-  %1 = inttoptr i64 %conv to i32*
-  %cmp = icmp eq i32* %1, %b
+  %1 = inttoptr i64 %conv to ptr
+  %cmp = icmp eq ptr %1, %b
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %call = tail call signext i32 bitcast (i32 (...)* @callVoid to i32 ()*)()
-  %call2 = tail call signext i32 @callNonVoid(i32* %b)
+  %call = tail call signext i32 @callVoid()
+  %call2 = tail call signext i32 @callNonVoid(ptr %b)
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
@@ -93,9 +93,9 @@ if.end:                                           ; preds = %if.then, %entry
 
 declare signext i32 @callVoid(...) local_unnamed_addr
 
-declare signext i32 @callNonVoid(i32*) local_unnamed_addr
+declare signext i32 @callNonVoid(ptr) local_unnamed_addr
 
-define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr uwtable  {
+define dso_local signext i32 @test2(ptr %p1) local_unnamed_addr uwtable  {
 ; CHECK-LABEL: test2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
@@ -170,19 +170,19 @@ define dso_local signext i32 @test2(i32* %p1) local_unnamed_addr uwtable  {
 ; CHECK-APPLE-NEXT:    ret
 ; CHECK-APPLE-NEXT:    .loh AdrpLdr Lloh2, Lloh3
 entry:
-  %tobool = icmp eq i32* %p1, null
+  %tobool = icmp eq ptr %p1, null
   br i1 %tobool, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
-  %0 = load i32, i32* @a, align 4, !tbaa !2
+  %0 = load i32, ptr @a, align 4, !tbaa !2
   %conv = sext i32 %0 to i64
-  %1 = inttoptr i64 %conv to i32*
-  %cmp = icmp eq i32* %1, %p1
+  %1 = inttoptr i64 %conv to ptr
+  %cmp = icmp eq ptr %1, %p1
   br i1 %cmp, label %if.then2, label %return
 
 if.then2:                                         ; preds = %if.end
-  %call = tail call signext i32 bitcast (i32 (...)* @callVoid to i32 ()*)()
-  %call3 = tail call signext i32 @callNonVoid(i32* nonnull %p1)
+  %call = tail call signext i32 @callVoid()
+  %call3 = tail call signext i32 @callNonVoid(ptr nonnull %p1)
   br label %return
 
 return:                                           ; preds = %if.end, %entry, %if.then2
@@ -191,7 +191,7 @@ return:                                           ; preds = %if.end, %entry, %if
 }
 
 
-define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_addr uwtable  {
+define dso_local ptr @test3(ptr nocapture %p1, i8 zeroext %p2) local_unnamed_addr uwtable  {
 ; CHECK-LABEL: test3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
@@ -244,20 +244,20 @@ define dso_local i8* @test3(i8** nocapture %p1, i8 zeroext %p2) local_unnamed_ad
 ; CHECK-APPLE-NEXT:    .cfi_restore w20
 ; CHECK-APPLE-NEXT:    ret
 entry:
-  %0 = load i8*, i8** %p1, align 8, !tbaa !6
-  %tobool = icmp eq i8* %0, null
+  %0 = load ptr, ptr %p1, align 8, !tbaa !6
+  %tobool = icmp eq ptr %0, null
   br i1 %tobool, label %land.end, label %land.rhs
 
 land.rhs:                                         ; preds = %entry
-  %call = tail call i8* @bar(i8* nonnull %0, i8 zeroext %p2)
-  store i8* %call, i8** %p1, align 8, !tbaa !6
+  %call = tail call ptr @bar(ptr nonnull %0, i8 zeroext %p2)
+  store ptr %call, ptr %p1, align 8, !tbaa !6
   br label %land.end
 
 land.end:                                         ; preds = %entry, %land.rhs
-  ret i8* %0
+  ret ptr %0
 }
 
-declare i8* @bar(i8*, i8 zeroext) local_unnamed_addr
+declare ptr @bar(ptr, i8 zeroext) local_unnamed_addr
 
 
 !llvm.module.flags = !{!0}

diff  --git a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
index 237aa4b7c0953..21367aaa8b07f 100644
--- a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
+++ b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
@@ -12,22 +12,22 @@
 @__tls_guard = internal thread_local unnamed_addr global i1 false
 @sum1 = internal thread_local global i32 0, align 4
 
-declare %struct.S* @_ZN1SC1Ev(%struct.S* returned)
-declare %struct.S* @_ZN1SD1Ev(%struct.S* returned)
-declare i32 @_tlv_atexit(void (i8*)*, i8*, i8*)
+declare ptr @_ZN1SC1Ev(ptr returned)
+declare ptr @_ZN1SD1Ev(ptr returned)
+declare i32 @_tlv_atexit(ptr, ptr, ptr)
 
-define cxx_fast_tlscc nonnull %struct.S* @_ZTW2sg() nounwind {
-  %.b.i = load i1, i1* @__tls_guard, align 1
+define cxx_fast_tlscc nonnull ptr @_ZTW2sg() nounwind {
+  %.b.i = load i1, ptr @__tls_guard, align 1
   br i1 %.b.i, label %__tls_init.exit, label %init.i
 
 init.i:
-  store i1 true, i1* @__tls_guard, align 1
-  %call.i.i = tail call %struct.S* @_ZN1SC1Ev(%struct.S* nonnull @sg)
-  %1 = tail call i32 @_tlv_atexit(void (i8*)* nonnull bitcast (%struct.S* (%struct.S*)* @_ZN1SD1Ev to void (i8*)*), i8* nonnull getelementptr inbounds (%struct.S, %struct.S* @sg, i64 0, i32 0), i8* nonnull @__dso_handle)
+  store i1 true, ptr @__tls_guard, align 1
+  %call.i.i = tail call ptr @_ZN1SC1Ev(ptr nonnull @sg)
+  %1 = tail call i32 @_tlv_atexit(ptr nonnull @_ZN1SD1Ev, ptr nonnull @sg, ptr nonnull @__dso_handle)
   br label %__tls_init.exit
 
 __tls_init.exit:
-  ret %struct.S* @sg
+  ret ptr @sg
 }
 
 ; CHECK-LABEL: _ZTW2sg
@@ -152,8 +152,8 @@ __tls_init.exit:
 ; CHECK-O0-LABEL: _ZTW4sum1
 ; CHECK-O0-NOT: vstr
 ; CHECK-O0-NOT: vldr
-define cxx_fast_tlscc nonnull i32* @_ZTW4sum1() nounwind {
-  ret i32* @sum1
+define cxx_fast_tlscc nonnull ptr @_ZTW4sum1() nounwind {
+  ret ptr @sum1
 }
 
 ; Make sure at O0, we don't generate spilling/reloading of the CSRs.
@@ -203,20 +203,20 @@ define cxx_fast_tlscc nonnull i32* @_ZTW4sum1() nounwind {
 %class.C = type { i32 }
 @tC = internal thread_local global %class.C zeroinitializer, align 4
 declare cxx_fast_tlscc void @tls_helper()
-define cxx_fast_tlscc %class.C* @tls_test2() #1 {
+define cxx_fast_tlscc ptr @tls_test2() #1 {
   call cxx_fast_tlscc void @tls_helper()
-  ret %class.C* @tC
+  ret ptr @tC
 }
 
 ; Make sure we do not allow tail call when caller and callee have 
diff erent
 ; calling conventions.
-declare %class.C* @_ZN1CD1Ev(%class.C* readnone returned %this)
+declare ptr @_ZN1CD1Ev(ptr readnone returned %this)
 ; CHECK-LABEL: tls_test
 ; CHECK: bl __tlv_atexit
 define cxx_fast_tlscc void @__tls_test() {
 entry:
-  store i32 0, i32* getelementptr inbounds (%class.C, %class.C* @tC, i64 0, i32 0), align 4
-  %0 = tail call i32 @_tlv_atexit(void (i8*)* bitcast (%class.C* (%class.C*)* @_ZN1CD1Ev to void (i8*)*), i8* bitcast (%class.C* @tC to i8*), i8* nonnull @__dso_handle) #1
+  store i32 0, ptr @tC, align 4
+  %0 = tail call i32 @_tlv_atexit(ptr @_ZN1CD1Ev, ptr @tC, ptr nonnull @__dso_handle) #1
   ret void
 }
 
@@ -239,12 +239,12 @@ define cxx_fast_tlscc void @weird_prologue_regs(i32 %n) #1 {
   %p0 = alloca i32, i32 200
   %p1 = alloca i32, align 32
   %p2 = alloca i32, i32 %n
-  call void @callee(i32* %p0)
-  call void @callee(i32* %p1)
-  call void @callee(i32* %p2)
+  call void @callee(ptr %p0)
+  call void @callee(ptr %p1)
+  call void @callee(ptr %p2)
   ret void
 }
-declare void @callee(i32*)
+declare void @callee(ptr)
 
 attributes #0 = { nounwind "frame-pointer"="all" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/dag-ReplaceAllUsesOfValuesWith.ll b/llvm/test/CodeGen/AArch64/dag-ReplaceAllUsesOfValuesWith.ll
index 90b004233fb52..d76e817e62a49 100644
--- a/llvm/test/CodeGen/AArch64/dag-ReplaceAllUsesOfValuesWith.ll
+++ b/llvm/test/CodeGen/AArch64/dag-ReplaceAllUsesOfValuesWith.ll
@@ -24,7 +24,7 @@
 ; #11 0x0000000002e12f41 (anonymous namespace)::DAGCombiner::visit(llvm::SDNode*) DAGCombiner.cpp:0:0
 ; #12 0x0000000002e14fe5 (anonymous namespace)::DAGCombiner::combine(llvm::SDNode*) DAGCombiner.cpp:0:0
 
-define i64 @g({ i64, i64 }* %p) {
+define i64 @g(ptr %p) {
 ; CHECK-LABEL: g:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0, #8]
@@ -32,11 +32,10 @@ define i64 @g({ i64, i64 }* %p) {
 ; CHECK-NEXT:    add x8, x9, x8
 ; CHECK-NEXT:    sub x0, x8, x8
 ; CHECK-NEXT:    ret
-  %vecp = bitcast { i64, i64 }* %p to <2 x i64>*
-  %vec = load <2 x i64>, <2 x i64>* %vecp, align 1
+  %vec = load <2 x i64>, ptr %p, align 1
   %elt = extractelement <2 x i64> %vec, i32 1
-  %scalarp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %p, i32 0, i32 1
-  %scalar = load i64, i64* %scalarp, align 1
+  %scalarp = getelementptr inbounds { i64, i64 }, ptr %p, i32 0, i32 1
+  %scalar = load i64, ptr %scalarp, align 1
   %add.i62 = add i64 %elt, %scalar
   %add.i66 = add i64 %add.i62, %elt
   %add.i72 = add i64 %scalar, %scalar

diff  --git a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll
index cdbc6d77a4c3e..61df3965ca1be 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll
@@ -3,24 +3,24 @@
 @.str2 = private unnamed_addr constant [9 x i8] c"_%d____\0A\00", align 1
 
 ; Function Attrs: nounwind ssp
-define i32 @main(i32 %argc, i8** %argv) #0 {
+define i32 @main(i32 %argc, ptr %argv) #0 {
 main_:
   %tmp = alloca i32, align 4
   %i32T = alloca i32, align 4
   %i32F = alloca i32, align 4
   %i32X = alloca i32, align 4
-  store i32 %argc, i32* %tmp
-  store i32 15, i32* %i32T, align 4
-  store i32 5, i32* %i32F, align 4
-  %tmp6 = load i32, i32* %tmp, align 4
+  store i32 %argc, ptr %tmp
+  store i32 15, ptr %i32T, align 4
+  store i32 5, ptr %i32F, align 4
+  %tmp6 = load i32, ptr %tmp, align 4
   %tmp7 = icmp ne i32 %tmp6, 0
   %tmp8 = xor i1 %tmp7, true
-  %tmp9 = load i32, i32* %i32T, align 4
-  %tmp10 = load i32, i32* %i32F, align 4
+  %tmp9 = load i32, ptr %i32T, align 4
+  %tmp10 = load i32, ptr %i32F, align 4
   %DHSelect = select i1 %tmp8, i32 %tmp9, i32 %tmp10
-  store i32 %DHSelect, i32* %i32X, align 4
-  %tmp15 = load i32, i32* %i32X, align 4
-  %tmp17 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str2, i32 0, i32 0), i32 %tmp15)
+  store i32 %DHSelect, ptr %i32X, align 4
+  %tmp15 = load i32, ptr %i32X, align 4
+  %tmp17 = call i32 (ptr, ...) @printf(ptr @.str2, i32 %tmp15)
   ret i32 0
 
 ; CHECK: main:
@@ -30,7 +30,7 @@ main_:
 }
 
 
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
 
 attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll b/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
index 7a5f763d44e20..ac6fd776106c8 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-lifetime-end-store-typesize.ll
@@ -4,15 +4,15 @@
 ; This issue appeared in DAGCombiner::visitLIFETIME_END when visiting a LIFETIME_END
 ; node linked to a scalable store.
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
 define void @foo(<vscale x 4 x i32>* nocapture dereferenceable(16) %ptr) {
 entry:
   %tmp = alloca <vscale x 4 x i32>, align 8
-  %tmp_ptr = bitcast <vscale x 4 x i32>* %tmp to i8*
-  call void @llvm.lifetime.start.p0i8(i64 32, i8* %tmp_ptr)
+  %tmp_ptr = bitcast <vscale x 4 x i32>* %tmp to ptr
+  call void @llvm.lifetime.start.p0(i64 32, ptr %tmp_ptr)
   store <vscale x 4 x i32> undef, <vscale x 4 x i32>* %ptr
-  call void @llvm.lifetime.end.p0i8(i64 32, i8* %tmp_ptr)
+  call void @llvm.lifetime.end.p0(i64 32, ptr %tmp_ptr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/dag-combine-select.ll b/llvm/test/CodeGen/AArch64/dag-combine-select.ll
index 6b08479a6d62d..92ae9410b965f 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-select.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-select.ll
@@ -69,8 +69,8 @@ define void @test1(i32 %bitset, i32 %val0, i32 %val1) {
   %cmp3 = icmp eq i32 %bitset, 42
   %or.cond = or i1 %cmp3, %cmp5
   %cond17 = select i1 %or.cond, i32 %val0, i32 %val1
-  store volatile i32 %cond11, i32* @out, align 4
-  store volatile i32 %cond17, i32* @out, align 4
+  store volatile i32 %cond11, ptr @out, align 4
+  store volatile i32 %cond17, ptr @out, align 4
   ret void
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:

diff  --git a/llvm/test/CodeGen/AArch64/darwinpcs-tail.ll b/llvm/test/CodeGen/AArch64/darwinpcs-tail.ll
index 9d13ed6269245..5d3c755d0d73d 100644
--- a/llvm/test/CodeGen/AArch64/darwinpcs-tail.ll
+++ b/llvm/test/CodeGen/AArch64/darwinpcs-tail.ll
@@ -13,24 +13,24 @@
 ; CHECK:       b __ZN1C1fEiiiiiiiiiz
 
 %class.C = type { %class.A.base, [4 x i8], %class.B.base, [4 x i8] }
-%class.A.base = type <{ i32 (...)**, i32 }>
-%class.B.base = type <{ i32 (...)**, i32 }>
+%class.A.base = type <{ ptr, i32 }>
+%class.B.base = type <{ ptr, i32 }>
 
-declare void @_ZN1C3addEPKcz(%class.C*, i8*, ...) unnamed_addr #0 align 2
+declare void @_ZN1C3addEPKcz(ptr, ptr, ...) unnamed_addr #0 align 2
 
-define void @_ZThn16_N1C3addEPKcz(%class.C* %0, i8* %1, ...) unnamed_addr #0 align 2 {
-  musttail call void (%class.C*, i8*, ...) @_ZN1C3addEPKcz(%class.C* noundef nonnull align 8 dereferenceable(28) undef, i8* noundef %1, ...)
+define void @_ZThn16_N1C3addEPKcz(ptr %0, ptr %1, ...) unnamed_addr #0 align 2 {
+  musttail call void (ptr, ptr, ...) @_ZN1C3addEPKcz(ptr noundef nonnull align 8 dereferenceable(28) undef, ptr noundef %1, ...)
   ret void
 }
 
-define void @tailTest(%class.C* %0, i8* %1, ...) unnamed_addr #0 align 2 {
-  tail call void (%class.C*, i8*, ...) @_ZN1C3addEPKcz(%class.C* noundef nonnull align 8 dereferenceable(28) undef, i8* noundef %1)
+define void @tailTest(ptr %0, ptr %1, ...) unnamed_addr #0 align 2 {
+  tail call void (ptr, ptr, ...) @_ZN1C3addEPKcz(ptr noundef nonnull align 8 dereferenceable(28) undef, ptr noundef %1)
   ret void
 }
 
-declare void @_ZN1C1fEiiiiiiiiiz(%class.C* %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 noundef %9, ...) unnamed_addr #1 align 2
+declare void @_ZN1C1fEiiiiiiiiiz(ptr %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 noundef %9, ...) unnamed_addr #1 align 2
 
-define void @_ZThn8_N1C1fEiiiiiiiiiz(%class.C* %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 noundef %9, ...) unnamed_addr #1 align 2 {
-  musttail call void (%class.C*, i32, i32, i32, i32, i32, i32, i32, i32, i32, ...) @_ZN1C1fEiiiiiiiiiz(%class.C* nonnull align 8 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 noundef %9, ...)
+define void @_ZThn8_N1C1fEiiiiiiiiiz(ptr %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 noundef %9, ...) unnamed_addr #1 align 2 {
+  musttail call void (ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, ...) @_ZN1C1fEiiiiiiiiiz(ptr nonnull align 8 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 noundef %9, ...)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/dbg-declare-tag-offset.ll b/llvm/test/CodeGen/AArch64/dbg-declare-tag-offset.ll
index 1db53a63b384d..08b7b63eba1fe 100644
--- a/llvm/test/CodeGen/AArch64/dbg-declare-tag-offset.ll
+++ b/llvm/test/CodeGen/AArch64/dbg-declare-tag-offset.ll
@@ -14,10 +14,10 @@ target triple="aarch64--"
 
 define void @f() !dbg !6 {
 entry:
-  %a = alloca i8*
-  %b = alloca i8*
-  call void @llvm.dbg.declare(metadata i8** %a, metadata !12, metadata !DIExpression(DW_OP_LLVM_tag_offset, 1)), !dbg !14
-  call void @llvm.dbg.declare(metadata i8** %b, metadata !13, metadata !DIExpression(DW_OP_LLVM_tag_offset, 2)), !dbg !14
+  %a = alloca ptr
+  %b = alloca ptr
+  call void @llvm.dbg.declare(metadata ptr %a, metadata !12, metadata !DIExpression(DW_OP_LLVM_tag_offset, 1)), !dbg !14
+  call void @llvm.dbg.declare(metadata ptr %b, metadata !13, metadata !DIExpression(DW_OP_LLVM_tag_offset, 2)), !dbg !14
   ret void, !dbg !15
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/dbg-value-tag-offset.ll b/llvm/test/CodeGen/AArch64/dbg-value-tag-offset.ll
index 45c4918693c94..521374388147f 100644
--- a/llvm/test/CodeGen/AArch64/dbg-value-tag-offset.ll
+++ b/llvm/test/CodeGen/AArch64/dbg-value-tag-offset.ll
@@ -16,18 +16,16 @@ target triple = "aarch64-unknown-linux-android24"
 define dso_local void @f() !dbg !14 {
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
-  %3 = bitcast i32* %1 to i8*, !dbg !21
-  %4 = bitcast i32* %2 to i8*, !dbg !21
   call void @llvm.dbg.value(metadata i32 1, metadata !20, metadata !DIExpression()), !dbg !22
-  store i32 1, i32* %2, align 4, !dbg !23, !tbaa !24
-  call void @llvm.dbg.value(metadata i32* %1, metadata !18, metadata !DIExpression(DW_OP_LLVM_tag_offset, 0, DW_OP_deref)), !dbg !22
-  call void @use(i8* nonnull %3), !dbg !28
-  call void @llvm.dbg.value(metadata i32* %2, metadata !20, metadata !DIExpression(DW_OP_LLVM_tag_offset, 128, DW_OP_deref)), !dbg !22
-  call void @use(i8* nonnull %4), !dbg !29
+  store i32 1, ptr %2, align 4, !dbg !23, !tbaa !24
+  call void @llvm.dbg.value(metadata ptr %1, metadata !18, metadata !DIExpression(DW_OP_LLVM_tag_offset, 0, DW_OP_deref)), !dbg !22
+  call void @use(ptr nonnull %1), !dbg !28
+  call void @llvm.dbg.value(metadata ptr %2, metadata !20, metadata !DIExpression(DW_OP_LLVM_tag_offset, 128, DW_OP_deref)), !dbg !22
+  call void @use(ptr nonnull %2), !dbg !29
   ret void, !dbg !30
 }
 
-declare !dbg !5 void @use(i8*)
+declare !dbg !5 void @use(ptr)
 
 declare void @llvm.dbg.value(metadata, metadata, metadata)
 

diff  --git a/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-signed.ll
index 72e9a1e710f18..49eeb476c601f 100644
--- a/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-signed.ll
+++ b/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-signed.ll
@@ -7,7 +7,7 @@
 ; But if the target does have a single div/rem operation,
 ; the opposite transform is likely beneficial.
 
-define i8 @scalar_i8(i8 %x, i8 %y, i8* %divdst) nounwind {
+define i8 @scalar_i8(i8 %x, i8 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i8:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    sxtb w8, w1
@@ -17,13 +17,13 @@ define i8 @scalar_i8(i8 %x, i8 %y, i8* %divdst) nounwind {
 ; ALL-NEXT:    strb w8, [x2]
 ; ALL-NEXT:    ret
   %div = sdiv i8 %x, %y
-  store i8 %div, i8* %divdst, align 4
+  store i8 %div, ptr %divdst, align 4
   %t1 = mul i8 %div, %y
   %t2 = sub i8 %x, %t1
   ret i8 %t2
 }
 
-define i16 @scalar_i16(i16 %x, i16 %y, i16* %divdst) nounwind {
+define i16 @scalar_i16(i16 %x, i16 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    sxth w8, w1
@@ -33,13 +33,13 @@ define i16 @scalar_i16(i16 %x, i16 %y, i16* %divdst) nounwind {
 ; ALL-NEXT:    strh w8, [x2]
 ; ALL-NEXT:    ret
   %div = sdiv i16 %x, %y
-  store i16 %div, i16* %divdst, align 4
+  store i16 %div, ptr %divdst, align 4
   %t1 = mul i16 %div, %y
   %t2 = sub i16 %x, %t1
   ret i16 %t2
 }
 
-define i32 @scalar_i32(i32 %x, i32 %y, i32* %divdst) nounwind {
+define i32 @scalar_i32(i32 %x, i32 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i32:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    sdiv w8, w0, w1
@@ -47,13 +47,13 @@ define i32 @scalar_i32(i32 %x, i32 %y, i32* %divdst) nounwind {
 ; ALL-NEXT:    str w8, [x2]
 ; ALL-NEXT:    ret
   %div = sdiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %div, %y
   %t2 = sub i32 %x, %t1
   ret i32 %t2
 }
 
-define i64 @scalar_i64(i64 %x, i64 %y, i64* %divdst) nounwind {
+define i64 @scalar_i64(i64 %x, i64 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i64:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    sdiv x8, x0, x1
@@ -61,13 +61,13 @@ define i64 @scalar_i64(i64 %x, i64 %y, i64* %divdst) nounwind {
 ; ALL-NEXT:    str x8, [x2]
 ; ALL-NEXT:    ret
   %div = sdiv i64 %x, %y
-  store i64 %div, i64* %divdst, align 4
+  store i64 %div, ptr %divdst, align 4
   %t1 = mul i64 %div, %y
   %t2 = sub i64 %x, %t1
   ret i64 %t2
 }
 
-define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y, <16 x i8>* %divdst) nounwind {
+define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i8:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    smov w8, v1.b[1]
@@ -138,13 +138,13 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y, <16 x i8>* %divdst)
 ; ALL-NEXT:    str q2, [x0]
 ; ALL-NEXT:    ret
   %div = sdiv <16 x i8> %x, %y
-  store <16 x i8> %div, <16 x i8>* %divdst, align 16
+  store <16 x i8> %div, ptr %divdst, align 16
   %t1 = mul <16 x i8> %div, %y
   %t2 = sub <16 x i8> %x, %t1
   ret <16 x i8> %t2
 }
 
-define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y, <8 x i16>* %divdst) nounwind {
+define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    smov w8, v1.h[1]
@@ -183,13 +183,13 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y, <8 x i16>* %divdst
 ; ALL-NEXT:    str q2, [x0]
 ; ALL-NEXT:    ret
   %div = sdiv <8 x i16> %x, %y
-  store <8 x i16> %div, <8 x i16>* %divdst, align 16
+  store <8 x i16> %div, ptr %divdst, align 16
   %t1 = mul <8 x i16> %div, %y
   %t2 = sub <8 x i16> %x, %t1
   ret <8 x i16> %t2
 }
 
-define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y, <4 x i32>* %divdst) nounwind {
+define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i32:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    mov w8, v1.s[1]
@@ -212,13 +212,13 @@ define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y, <4 x i32>* %divdst
 ; ALL-NEXT:    str q2, [x0]
 ; ALL-NEXT:    ret
   %div = sdiv <4 x i32> %x, %y
-  store <4 x i32> %div, <4 x i32>* %divdst, align 16
+  store <4 x i32> %div, ptr %divdst, align 16
   %t1 = mul <4 x i32> %div, %y
   %t2 = sub <4 x i32> %x, %t1
   ret <4 x i32> %t2
 }
 
-define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64>* %divdst) nounwind {
+define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i64:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    fmov x8, d1
@@ -237,7 +237,7 @@ define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64>* %divdst
 ; ALL-NEXT:    sub v0.2d, v0.2d, v1.2d
 ; ALL-NEXT:    ret
   %div = sdiv <2 x i64> %x, %y
-  store <2 x i64> %div, <2 x i64>* %divdst, align 16
+  store <2 x i64> %div, ptr %divdst, align 16
   %t1 = mul <2 x i64> %div, %y
   %t2 = sub <2 x i64> %x, %t1
   ret <2 x i64> %t2
@@ -245,7 +245,7 @@ define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64>* %divdst
 
 ; Special tests.
 
-define i32 @scalar_i32_commutative(i32 %x, i32* %ysrc, i32* %divdst) nounwind {
+define i32 @scalar_i32_commutative(i32 %x, ptr %ysrc, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i32_commutative:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    ldr w8, [x1]
@@ -253,16 +253,16 @@ define i32 @scalar_i32_commutative(i32 %x, i32* %ysrc, i32* %divdst) nounwind {
 ; ALL-NEXT:    msub w0, w8, w9, w0
 ; ALL-NEXT:    str w9, [x2]
 ; ALL-NEXT:    ret
-  %y = load i32, i32* %ysrc, align 4
+  %y = load i32, ptr %ysrc, align 4
   %div = sdiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %y, %div ; commutative
   %t2 = sub i32 %x, %t1
   ret i32 %t2
 }
 
 ; We do not care about extra uses.
-define i32 @extrause(i32 %x, i32 %y, i32* %divdst, i32* %t1dst) nounwind {
+define i32 @extrause(i32 %x, i32 %y, ptr %divdst, ptr %t1dst) nounwind {
 ; ALL-LABEL: extrause:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    sdiv w8, w0, w1
@@ -272,15 +272,15 @@ define i32 @extrause(i32 %x, i32 %y, i32* %divdst, i32* %t1dst) nounwind {
 ; ALL-NEXT:    str w9, [x3]
 ; ALL-NEXT:    ret
   %div = sdiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %div, %y
-  store i32 %t1, i32* %t1dst, align 4
+  store i32 %t1, ptr %t1dst, align 4
   %t2 = sub i32 %x, %t1
   ret i32 %t2
 }
 
 ; 'rem' should appear next to 'div'.
-define i32 @multiple_bb(i32 %x, i32 %y, i32* %divdst, i1 zeroext %store_srem, i32* %sremdst) nounwind {
+define i32 @multiple_bb(i32 %x, i32 %y, ptr %divdst, i1 zeroext %store_srem, ptr %sremdst) nounwind {
 ; ALL-LABEL: multiple_bb:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    mov w8, w0
@@ -293,18 +293,18 @@ define i32 @multiple_bb(i32 %x, i32 %y, i32* %divdst, i1 zeroext %store_srem, i3
 ; ALL-NEXT:  .LBB10_2: // %end
 ; ALL-NEXT:    ret
   %div = sdiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   br i1 %store_srem, label %do_srem, label %end
 do_srem:
   %t1 = mul i32 %div, %y
   %t2 = sub i32 %x, %t1
-  store i32 %t2, i32* %sremdst, align 4
+  store i32 %t2, ptr %sremdst, align 4
   br label %end
 end:
   ret i32 %div
 }
 
-define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwind {
+define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: negative_
diff erent_x:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    sdiv w8, w0, w2
@@ -312,7 +312,7 @@ define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwin
 ; ALL-NEXT:    str w8, [x3]
 ; ALL-NEXT:    ret
   %div = sdiv i32 %x0, %y ; not %x1
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %div, %y
   %t2 = sub i32 %x1, %t1 ; not %x0
   ret i32 %t2

diff  --git a/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-unsigned.ll
index c514cc99f014d..aff2087025e32 100644
--- a/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/AArch64/div-rem-pair-recomposition-unsigned.ll
@@ -7,7 +7,7 @@
 ; But if the target does have a single div/rem operation,
 ; the opposite transform is likely beneficial.
 
-define i8 @scalar_i8(i8 %x, i8 %y, i8* %divdst) nounwind {
+define i8 @scalar_i8(i8 %x, i8 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i8:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    and w8, w1, #0xff
@@ -17,13 +17,13 @@ define i8 @scalar_i8(i8 %x, i8 %y, i8* %divdst) nounwind {
 ; ALL-NEXT:    strb w8, [x2]
 ; ALL-NEXT:    ret
   %div = udiv i8 %x, %y
-  store i8 %div, i8* %divdst, align 4
+  store i8 %div, ptr %divdst, align 4
   %t1 = mul i8 %div, %y
   %t2 = sub i8 %x, %t1
   ret i8 %t2
 }
 
-define i16 @scalar_i16(i16 %x, i16 %y, i16* %divdst) nounwind {
+define i16 @scalar_i16(i16 %x, i16 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    and w8, w1, #0xffff
@@ -33,13 +33,13 @@ define i16 @scalar_i16(i16 %x, i16 %y, i16* %divdst) nounwind {
 ; ALL-NEXT:    strh w8, [x2]
 ; ALL-NEXT:    ret
   %div = udiv i16 %x, %y
-  store i16 %div, i16* %divdst, align 4
+  store i16 %div, ptr %divdst, align 4
   %t1 = mul i16 %div, %y
   %t2 = sub i16 %x, %t1
   ret i16 %t2
 }
 
-define i32 @scalar_i32(i32 %x, i32 %y, i32* %divdst) nounwind {
+define i32 @scalar_i32(i32 %x, i32 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i32:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    udiv w8, w0, w1
@@ -47,13 +47,13 @@ define i32 @scalar_i32(i32 %x, i32 %y, i32* %divdst) nounwind {
 ; ALL-NEXT:    str w8, [x2]
 ; ALL-NEXT:    ret
   %div = udiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %div, %y
   %t2 = sub i32 %x, %t1
   ret i32 %t2
 }
 
-define i64 @scalar_i64(i64 %x, i64 %y, i64* %divdst) nounwind {
+define i64 @scalar_i64(i64 %x, i64 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i64:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    udiv x8, x0, x1
@@ -61,13 +61,13 @@ define i64 @scalar_i64(i64 %x, i64 %y, i64* %divdst) nounwind {
 ; ALL-NEXT:    str x8, [x2]
 ; ALL-NEXT:    ret
   %div = udiv i64 %x, %y
-  store i64 %div, i64* %divdst, align 4
+  store i64 %div, ptr %divdst, align 4
   %t1 = mul i64 %div, %y
   %t2 = sub i64 %x, %t1
   ret i64 %t2
 }
 
-define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y, <16 x i8>* %divdst) nounwind {
+define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i8:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    umov w8, v1.b[1]
@@ -138,13 +138,13 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y, <16 x i8>* %divdst)
 ; ALL-NEXT:    str q2, [x0]
 ; ALL-NEXT:    ret
   %div = udiv <16 x i8> %x, %y
-  store <16 x i8> %div, <16 x i8>* %divdst, align 16
+  store <16 x i8> %div, ptr %divdst, align 16
   %t1 = mul <16 x i8> %div, %y
   %t2 = sub <16 x i8> %x, %t1
   ret <16 x i8> %t2
 }
 
-define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y, <8 x i16>* %divdst) nounwind {
+define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    umov w8, v1.h[1]
@@ -183,13 +183,13 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y, <8 x i16>* %divdst
 ; ALL-NEXT:    str q2, [x0]
 ; ALL-NEXT:    ret
   %div = udiv <8 x i16> %x, %y
-  store <8 x i16> %div, <8 x i16>* %divdst, align 16
+  store <8 x i16> %div, ptr %divdst, align 16
   %t1 = mul <8 x i16> %div, %y
   %t2 = sub <8 x i16> %x, %t1
   ret <8 x i16> %t2
 }
 
-define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y, <4 x i32>* %divdst) nounwind {
+define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i32:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    mov w8, v1.s[1]
@@ -212,13 +212,13 @@ define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y, <4 x i32>* %divdst
 ; ALL-NEXT:    str q2, [x0]
 ; ALL-NEXT:    ret
   %div = udiv <4 x i32> %x, %y
-  store <4 x i32> %div, <4 x i32>* %divdst, align 16
+  store <4 x i32> %div, ptr %divdst, align 16
   %t1 = mul <4 x i32> %div, %y
   %t2 = sub <4 x i32> %x, %t1
   ret <4 x i32> %t2
 }
 
-define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64>* %divdst) nounwind {
+define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, ptr %divdst) nounwind {
 ; ALL-LABEL: vector_i128_i64:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    fmov x8, d1
@@ -237,7 +237,7 @@ define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64>* %divdst
 ; ALL-NEXT:    sub v0.2d, v0.2d, v1.2d
 ; ALL-NEXT:    ret
   %div = udiv <2 x i64> %x, %y
-  store <2 x i64> %div, <2 x i64>* %divdst, align 16
+  store <2 x i64> %div, ptr %divdst, align 16
   %t1 = mul <2 x i64> %div, %y
   %t2 = sub <2 x i64> %x, %t1
   ret <2 x i64> %t2
@@ -245,7 +245,7 @@ define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y, <2 x i64>* %divdst
 
 ; Special tests.
 
-define i32 @scalar_i32_commutative(i32 %x, i32* %ysrc, i32* %divdst) nounwind {
+define i32 @scalar_i32_commutative(i32 %x, ptr %ysrc, ptr %divdst) nounwind {
 ; ALL-LABEL: scalar_i32_commutative:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    ldr w8, [x1]
@@ -253,16 +253,16 @@ define i32 @scalar_i32_commutative(i32 %x, i32* %ysrc, i32* %divdst) nounwind {
 ; ALL-NEXT:    msub w0, w8, w9, w0
 ; ALL-NEXT:    str w9, [x2]
 ; ALL-NEXT:    ret
-  %y = load i32, i32* %ysrc, align 4
+  %y = load i32, ptr %ysrc, align 4
   %div = udiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %y, %div ; commutative
   %t2 = sub i32 %x, %t1
   ret i32 %t2
 }
 
 ; We do not care about extra uses.
-define i32 @extrause(i32 %x, i32 %y, i32* %divdst, i32* %t1dst) nounwind {
+define i32 @extrause(i32 %x, i32 %y, ptr %divdst, ptr %t1dst) nounwind {
 ; ALL-LABEL: extrause:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    udiv w8, w0, w1
@@ -272,15 +272,15 @@ define i32 @extrause(i32 %x, i32 %y, i32* %divdst, i32* %t1dst) nounwind {
 ; ALL-NEXT:    str w9, [x3]
 ; ALL-NEXT:    ret
   %div = udiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %div, %y
-  store i32 %t1, i32* %t1dst, align 4
+  store i32 %t1, ptr %t1dst, align 4
   %t2 = sub i32 %x, %t1
   ret i32 %t2
 }
 
 ; 'rem' should appear next to 'div'.
-define i32 @multiple_bb(i32 %x, i32 %y, i32* %divdst, i1 zeroext %store_urem, i32* %uremdst) nounwind {
+define i32 @multiple_bb(i32 %x, i32 %y, ptr %divdst, i1 zeroext %store_urem, ptr %uremdst) nounwind {
 ; ALL-LABEL: multiple_bb:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    mov w8, w0
@@ -293,18 +293,18 @@ define i32 @multiple_bb(i32 %x, i32 %y, i32* %divdst, i1 zeroext %store_urem, i3
 ; ALL-NEXT:  .LBB10_2: // %end
 ; ALL-NEXT:    ret
   %div = udiv i32 %x, %y
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   br i1 %store_urem, label %do_urem, label %end
 do_urem:
   %t1 = mul i32 %div, %y
   %t2 = sub i32 %x, %t1
-  store i32 %t2, i32* %uremdst, align 4
+  store i32 %t2, ptr %uremdst, align 4
   br label %end
 end:
   ret i32 %div
 }
 
-define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwind {
+define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, ptr %divdst) nounwind {
 ; ALL-LABEL: negative_
diff erent_x:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    udiv w8, w0, w2
@@ -312,7 +312,7 @@ define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwin
 ; ALL-NEXT:    str w8, [x3]
 ; ALL-NEXT:    ret
   %div = udiv i32 %x0, %y ; not %x1
-  store i32 %div, i32* %divdst, align 4
+  store i32 %div, ptr %divdst, align 4
   %t1 = mul i32 %div, %y
   %t2 = sub i32 %x1, %t1 ; not %x0
   ret i32 %t2

diff  --git a/llvm/test/CodeGen/AArch64/dllexport.ll b/llvm/test/CodeGen/AArch64/dllexport.ll
index f408620e26d03..81ba674a0dedc 100644
--- a/llvm/test/CodeGen/AArch64/dllexport.ll
+++ b/llvm/test/CodeGen/AArch64/dllexport.ll
@@ -33,10 +33,10 @@ define weak_odr dllexport void @l() {
 @p = weak_odr dllexport global i32 0, align 4
 @q = weak_odr dllexport unnamed_addr constant i32 0
 
- at r = dllexport alias void (), void () * @f
- at s = dllexport alias void (), void () * @g
- at t = dllexport alias void (), void () * @f
- at u = weak_odr dllexport alias void (), void () * @g
+ at r = dllexport alias void (), ptr @f
+ at s = dllexport alias void (), ptr @g
+ at t = dllexport alias void (), ptr @f
+ at u = weak_odr dllexport alias void (), ptr @g
 
 ; CHECK: .section .drectve
 ; CHECK-GNU-NOT: -export:f

diff  --git a/llvm/test/CodeGen/AArch64/dllimport.ll b/llvm/test/CodeGen/AArch64/dllimport.ll
index 45de8d3be3787..914f2ffbdb8b2 100644
--- a/llvm/test/CodeGen/AArch64/dllimport.ll
+++ b/llvm/test/CodeGen/AArch64/dllimport.ll
@@ -8,7 +8,7 @@ declare dllimport i32 @external()
 declare i32 @internal()
 
 define i32 @get_var() {
-  %1 = load i32, i32* @var, align 4
+  %1 = load i32, ptr @var, align 4
   ret i32 %1
 }
 
@@ -19,7 +19,7 @@ define i32 @get_var() {
 ; CHECK: ret
 
 define i32 @get_ext() {
-  %1 = load i32, i32* @ext, align 4
+  %1 = load i32, ptr @ext, align 4
   ret i32 %1
 }
 
@@ -31,8 +31,8 @@ define i32 @get_ext() {
 ; GLOBAL-ISEL-FALLBACK: ldr w0, [x8, :lo12:ext]
 ; CHECK: ret
 
-define i32* @get_var_pointer() {
-  ret i32* @var
+define ptr @get_var_pointer() {
+  ret ptr @var
 }
 
 ; CHECK-LABEL: get_var_pointer

diff  --git a/llvm/test/CodeGen/AArch64/dp-3source.ll b/llvm/test/CodeGen/AArch64/dp-3source.ll
index 3982fea95d6c2..313f671c19c5e 100644
--- a/llvm/test/CodeGen/AArch64/dp-3source.ll
+++ b/llvm/test/CodeGen/AArch64/dp-3source.ll
@@ -168,11 +168,11 @@ define i64 @test_umnegl(i32 %lhs, i32 %rhs) {
 
 define void @test_mneg(){
 ; CHECK-LABEL: test_mneg:
-  %1 = load i32, i32* @a, align 4
-  %2 = load i32, i32* @b, align 4
+  %1 = load i32, ptr @a, align 4
+  %2 = load i32, ptr @b, align 4
   %3 = sub i32 0, %1
   %4 = mul i32 %2, %3
-  store i32 %4, i32* @c, align 4
+  store i32 %4, ptr @c, align 4
 ; CHECK: mneg {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/dp1.ll b/llvm/test/CodeGen/AArch64/dp1.ll
index bfbd1a0358730..27b105381aa07 100644
--- a/llvm/test/CodeGen/AArch64/dp1.ll
+++ b/llvm/test/CodeGen/AArch64/dp1.ll
@@ -14,9 +14,9 @@ define void @rev_i32() {
 ; CHECK-NEXT:    rev w9, w9
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i32, i32* @var32
+  %val0_tmp = load i32, ptr @var32
   %val1_tmp = call i32 @llvm.bswap.i32(i32 %val0_tmp)
-  store volatile i32 %val1_tmp, i32* @var32
+  store volatile i32 %val1_tmp, ptr @var32
   ret void
 }
 
@@ -29,9 +29,9 @@ define void @rev_i64() {
 ; CHECK-NEXT:    rev x9, x9
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i64, i64* @var64
+  %val0_tmp = load i64, ptr @var64
   %val1_tmp = call i64 @llvm.bswap.i64(i64 %val0_tmp)
-  store volatile i64 %val1_tmp, i64* @var64
+  store volatile i64 %val1_tmp, ptr @var64
   ret void
 }
 
@@ -44,13 +44,13 @@ define void @rev32_i64() {
 ; CHECK-NEXT:    rev32 x9, x9
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i64, i64* @var64
+  %val0_tmp = load i64, ptr @var64
   %val1_tmp = shl i64 %val0_tmp, 32
   %val5_tmp = sub i64 64, 32
   %val2_tmp = lshr i64 %val0_tmp, %val5_tmp
   %val3_tmp = or i64 %val1_tmp, %val2_tmp
   %val4_tmp = call i64 @llvm.bswap.i64(i64 %val3_tmp)
-  store volatile i64 %val4_tmp, i64* @var64
+  store volatile i64 %val4_tmp, ptr @var64
   ret void
 }
 
@@ -63,12 +63,12 @@ define void @rev16_i32() {
 ; CHECK-NEXT:    rev16 w9, w9
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i32, i32* @var32
+  %val0_tmp = load i32, ptr @var32
   %val1_tmp = shl i32 %val0_tmp, 16
   %val2_tmp = lshr i32 %val0_tmp, 16
   %val3_tmp = or i32 %val1_tmp, %val2_tmp
   %val4_tmp = call i32 @llvm.bswap.i32(i32 %val3_tmp)
-  store volatile i32 %val4_tmp, i32* @var32
+  store volatile i32 %val4_tmp, ptr @var32
   ret void
 }
 
@@ -81,9 +81,9 @@ define void @clz_zerodef_i32() {
 ; CHECK-NEXT:    clz w9, w9
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i32, i32* @var32
+  %val0_tmp = load i32, ptr @var32
   %val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 0)
-  store volatile i32 %val4_tmp, i32* @var32
+  store volatile i32 %val4_tmp, ptr @var32
   ret void
 }
 
@@ -96,9 +96,9 @@ define void @clz_zerodef_i64() {
 ; CHECK-NEXT:    clz x9, x9
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i64, i64* @var64
+  %val0_tmp = load i64, ptr @var64
   %val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 0)
-  store volatile i64 %val4_tmp, i64* @var64
+  store volatile i64 %val4_tmp, ptr @var64
   ret void
 }
 
@@ -111,9 +111,9 @@ define void @clz_zeroundef_i32() {
 ; CHECK-NEXT:    clz w9, w9
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i32, i32* @var32
+  %val0_tmp = load i32, ptr @var32
   %val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 1)
-  store volatile i32 %val4_tmp, i32* @var32
+  store volatile i32 %val4_tmp, ptr @var32
   ret void
 }
 
@@ -126,9 +126,9 @@ define void @clz_zeroundef_i64() {
 ; CHECK-NEXT:    clz x9, x9
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i64, i64* @var64
+  %val0_tmp = load i64, ptr @var64
   %val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 1)
-  store volatile i64 %val4_tmp, i64* @var64
+  store volatile i64 %val4_tmp, ptr @var64
   ret void
 }
 
@@ -142,9 +142,9 @@ define void @cttz_zerodef_i32() {
 ; CHECK-NEXT:    clz w9, w9
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i32, i32* @var32
+  %val0_tmp = load i32, ptr @var32
   %val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 0)
-  store volatile i32 %val4_tmp, i32* @var32
+  store volatile i32 %val4_tmp, ptr @var32
   ret void
 }
 
@@ -158,9 +158,9 @@ define void @cttz_zerodef_i64() {
 ; CHECK-NEXT:    clz x9, x9
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i64, i64* @var64
+  %val0_tmp = load i64, ptr @var64
   %val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 0)
-  store volatile i64 %val4_tmp, i64* @var64
+  store volatile i64 %val4_tmp, ptr @var64
   ret void
 }
 
@@ -174,9 +174,9 @@ define void @cttz_zeroundef_i32() {
 ; CHECK-NEXT:    clz w9, w9
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i32, i32* @var32
+  %val0_tmp = load i32, ptr @var32
   %val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 1)
-  store volatile i32 %val4_tmp, i32* @var32
+  store volatile i32 %val4_tmp, ptr @var32
   ret void
 }
 
@@ -190,9 +190,9 @@ define void @cttz_zeroundef_i64() {
 ; CHECK-NEXT:    clz x9, x9
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val0_tmp = load i64, i64* @var64
+  %val0_tmp = load i64, ptr @var64
   %val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 1)
-  store volatile i64 %val4_tmp, i64* @var64
+  store volatile i64 %val4_tmp, ptr @var64
   ret void
 }
 
@@ -219,9 +219,9 @@ define void @ctpop_i32() {
 ; CHECK-GISEL-NEXT:    uaddlv h0, v0.8b
 ; CHECK-GISEL-NEXT:    str s0, [x8]
 ; CHECK-GISEL-NEXT:    ret
-  %val0_tmp = load i32, i32* @var32
+  %val0_tmp = load i32, ptr @var32
   %val4_tmp = call i32 @llvm.ctpop.i32(i32 %val0_tmp)
-  store volatile i32 %val4_tmp, i32* @var32
+  store volatile i32 %val4_tmp, ptr @var32
   ret void
 }
 
@@ -248,9 +248,9 @@ define void @ctpop_i64() {
 ; CHECK-GISEL-NEXT:    fmov w9, s0
 ; CHECK-GISEL-NEXT:    str x9, [x8]
 ; CHECK-GISEL-NEXT:    ret
-  %val0_tmp = load i64, i64* @var64
+  %val0_tmp = load i64, ptr @var64
   %val4_tmp = call i64 @llvm.ctpop.i64(i64 %val0_tmp)
-  store volatile i64 %val4_tmp, i64* @var64
+  store volatile i64 %val4_tmp, ptr @var64
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/dp2.ll b/llvm/test/CodeGen/AArch64/dp2.ll
index 0cd2d7ed84a73..2f9efeb460756 100644
--- a/llvm/test/CodeGen/AArch64/dp2.ll
+++ b/llvm/test/CodeGen/AArch64/dp2.ll
@@ -7,132 +7,132 @@
 
 define void @rorv_i64() {
 ; CHECK-LABEL: rorv_i64:
-    %val0_tmp = load i64, i64* @var64_0
-    %val1_tmp = load i64, i64* @var64_1
+    %val0_tmp = load i64, ptr @var64_0
+    %val1_tmp = load i64, ptr @var64_1
     %val2_tmp = sub i64 64, %val1_tmp
     %val3_tmp = shl i64 %val0_tmp, %val2_tmp
     %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
     %val5_tmp = or i64 %val3_tmp, %val4_tmp
 ; CHECK: {{ror|rorv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-    store volatile i64 %val5_tmp, i64* @var64_0
+    store volatile i64 %val5_tmp, ptr @var64_0
     ret void
 }
 
 define void @asrv_i64() {
 ; CHECK-LABEL: asrv_i64:
-    %val0_tmp = load i64, i64* @var64_0
-    %val1_tmp = load i64, i64* @var64_1
+    %val0_tmp = load i64, ptr @var64_0
+    %val1_tmp = load i64, ptr @var64_1
     %val4_tmp = ashr i64 %val0_tmp, %val1_tmp
 ; CHECK: {{asr|asrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-    store volatile i64 %val4_tmp, i64* @var64_1
+    store volatile i64 %val4_tmp, ptr @var64_1
     ret void
 }
 
 define void @lsrv_i64() {
 ; CHECK-LABEL: lsrv_i64:
-    %val0_tmp = load i64, i64* @var64_0
-    %val1_tmp = load i64, i64* @var64_1
+    %val0_tmp = load i64, ptr @var64_0
+    %val1_tmp = load i64, ptr @var64_1
     %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
 ; CHECK: {{lsr|lsrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-    store volatile i64 %val4_tmp, i64* @var64_0
+    store volatile i64 %val4_tmp, ptr @var64_0
     ret void
 }
 
 define void @lslv_i64() {
 ; CHECK-LABEL: lslv_i64:
-    %val0_tmp = load i64, i64* @var64_0
-    %val1_tmp = load i64, i64* @var64_1
+    %val0_tmp = load i64, ptr @var64_0
+    %val1_tmp = load i64, ptr @var64_1
     %val4_tmp = shl i64 %val0_tmp, %val1_tmp
 ; CHECK: {{lsl|lslv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-    store volatile i64 %val4_tmp, i64* @var64_1
+    store volatile i64 %val4_tmp, ptr @var64_1
     ret void
 }
 
 define void @udiv_i64() {
 ; CHECK-LABEL: udiv_i64:
-    %val0_tmp = load i64, i64* @var64_0
-    %val1_tmp = load i64, i64* @var64_1
+    %val0_tmp = load i64, ptr @var64_0
+    %val1_tmp = load i64, ptr @var64_1
     %val4_tmp = udiv i64 %val0_tmp, %val1_tmp
 ; CHECK: udiv	{{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-    store volatile i64 %val4_tmp, i64* @var64_0
+    store volatile i64 %val4_tmp, ptr @var64_0
     ret void
 }
 
 define void @sdiv_i64() {
 ; CHECK-LABEL: sdiv_i64:
-    %val0_tmp = load i64, i64* @var64_0
-    %val1_tmp = load i64, i64* @var64_1
+    %val0_tmp = load i64, ptr @var64_0
+    %val1_tmp = load i64, ptr @var64_1
     %val4_tmp = sdiv i64 %val0_tmp, %val1_tmp
 ; CHECK: sdiv	{{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
-    store volatile i64 %val4_tmp, i64* @var64_1
+    store volatile i64 %val4_tmp, ptr @var64_1
     ret void
 }
 
 
 define void @lsrv_i32() {
 ; CHECK-LABEL: lsrv_i32:
-    %val0_tmp = load i32, i32* @var32_0
-    %val1_tmp = load i32, i32* @var32_1
+    %val0_tmp = load i32, ptr @var32_0
+    %val1_tmp = load i32, ptr @var32_1
     %val2_tmp = add i32 1, %val1_tmp
     %val4_tmp = lshr i32 %val0_tmp, %val2_tmp
 ; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-    store volatile i32 %val4_tmp, i32* @var32_0
+    store volatile i32 %val4_tmp, ptr @var32_0
     ret void
 }
 
 define void @lslv_i32() {
 ; CHECK-LABEL: lslv_i32:
-    %val0_tmp = load i32, i32* @var32_0
-    %val1_tmp = load i32, i32* @var32_1
+    %val0_tmp = load i32, ptr @var32_0
+    %val1_tmp = load i32, ptr @var32_1
     %val2_tmp = add i32 1, %val1_tmp
     %val4_tmp = shl i32 %val0_tmp, %val2_tmp
 ; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-    store volatile i32 %val4_tmp, i32* @var32_1
+    store volatile i32 %val4_tmp, ptr @var32_1
     ret void
 }
 
 define void @rorv_i32() {
 ; CHECK-LABEL: rorv_i32:
-    %val0_tmp = load i32, i32* @var32_0
-    %val6_tmp = load i32, i32* @var32_1
+    %val0_tmp = load i32, ptr @var32_0
+    %val6_tmp = load i32, ptr @var32_1
     %val1_tmp = add i32 1, %val6_tmp
     %val2_tmp = sub i32 32, %val1_tmp
     %val3_tmp = shl i32 %val0_tmp, %val2_tmp
     %val4_tmp = lshr i32 %val0_tmp, %val1_tmp
     %val5_tmp = or i32 %val3_tmp, %val4_tmp
 ; CHECK: {{ror|rorv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-    store volatile i32 %val5_tmp, i32* @var32_0
+    store volatile i32 %val5_tmp, ptr @var32_0
     ret void
 }
 
 define void @asrv_i32() {
 ; CHECK-LABEL: asrv_i32:
-    %val0_tmp = load i32, i32* @var32_0
-    %val1_tmp = load i32, i32* @var32_1
+    %val0_tmp = load i32, ptr @var32_0
+    %val1_tmp = load i32, ptr @var32_1
     %val2_tmp = add i32 1, %val1_tmp
     %val4_tmp = ashr i32 %val0_tmp, %val2_tmp
 ; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-    store volatile i32 %val4_tmp, i32* @var32_1
+    store volatile i32 %val4_tmp, ptr @var32_1
     ret void
 }
 
 define void @sdiv_i32() {
 ; CHECK-LABEL: sdiv_i32:
-    %val0_tmp = load i32, i32* @var32_0
-    %val1_tmp = load i32, i32* @var32_1
+    %val0_tmp = load i32, ptr @var32_0
+    %val1_tmp = load i32, ptr @var32_1
     %val4_tmp = sdiv i32 %val0_tmp, %val1_tmp
 ; CHECK: sdiv	{{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-    store volatile i32 %val4_tmp, i32* @var32_1
+    store volatile i32 %val4_tmp, ptr @var32_1
     ret void
 }
 
 define void @udiv_i32() {
 ; CHECK-LABEL: udiv_i32:
-    %val0_tmp = load i32, i32* @var32_0
-    %val1_tmp = load i32, i32* @var32_1
+    %val0_tmp = load i32, ptr @var32_0
+    %val1_tmp = load i32, ptr @var32_1
     %val4_tmp = udiv i32 %val0_tmp, %val1_tmp
 ; CHECK: udiv	{{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
-    store volatile i32 %val4_tmp, i32* @var32_0
+    store volatile i32 %val4_tmp, ptr @var32_0
     ret void
 }
 
@@ -141,7 +141,7 @@ define void @udiv_i32() {
 define i32 @test_lsl32() {
 ; CHECK-LABEL: test_lsl32:
 
-  %val = load i32, i32* @var32_0
+  %val = load i32, ptr @var32_0
   %ret = shl i32 1, %val
 ; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
 
@@ -151,7 +151,7 @@ define i32 @test_lsl32() {
 define i32 @test_lsr32() {
 ; CHECK-LABEL: test_lsr32:
 
-  %val = load i32, i32* @var32_0
+  %val = load i32, ptr @var32_0
   %ret = lshr i32 1, %val
 ; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
 
@@ -161,7 +161,7 @@ define i32 @test_lsr32() {
 define i32 @test_asr32(i32 %in) {
 ; CHECK-LABEL: test_asr32:
 
-  %val = load i32, i32* @var32_0
+  %val = load i32, ptr @var32_0
   %ret = ashr i32 %in, %val
 ; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
 

diff  --git a/llvm/test/CodeGen/AArch64/dwarf-cfi.ll b/llvm/test/CodeGen/AArch64/dwarf-cfi.ll
index 1447ad364075e..7ee369dd1c9a9 100644
--- a/llvm/test/CodeGen/AArch64/dwarf-cfi.ll
+++ b/llvm/test/CodeGen/AArch64/dwarf-cfi.ll
@@ -1,15 +1,15 @@
 ; RUN: llc -mtriple aarch64-windows-gnu -exception-model=dwarf -filetype=asm -o - %s | FileCheck %s
 
-define void @_Z1gv() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @_Z1gv() personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @_Z1fv()
           to label %try.cont unwind label %lpad
 
 lpad:
-  %0 = landingpad { i8*, i32 }
-          catch i8* null
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = tail call i8* @__cxa_begin_catch(i8* %1) #2
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = tail call ptr @__cxa_begin_catch(ptr %1) #2
   tail call void @__cxa_end_catch()
   br label %try.cont
 
@@ -21,7 +21,7 @@ declare void @_Z1fv()
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()
 

diff  --git a/llvm/test/CodeGen/AArch64/eh_recoverfp.ll b/llvm/test/CodeGen/AArch64/eh_recoverfp.ll
index 777bcee543827..eb4fdd76d3773 100644
--- a/llvm/test/CodeGen/AArch64/eh_recoverfp.ll
+++ b/llvm/test/CodeGen/AArch64/eh_recoverfp.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -mtriple arm64-windows %s -o - 2>&1 | FileCheck %s
 
-define i8* @foo(i8* %a) {
+define ptr @foo(ptr %a) {
 ; CHECK-LABEL: foo
 ; CHECK-NOT: llvm.x86.seh.recoverfp
-  %1 = call i8* @llvm.x86.seh.recoverfp(i8* bitcast (i32 ()* @f to i8*), i8* %a)
-  ret i8* %1
+  %1 = call ptr @llvm.x86.seh.recoverfp(ptr @f, ptr %a)
+  ret ptr %1
 }
 
-declare i8* @llvm.x86.seh.recoverfp(i8*, i8*)
+declare ptr @llvm.x86.seh.recoverfp(ptr, ptr)
 declare i32 @f()

diff  --git a/llvm/test/CodeGen/AArch64/ehcontguard.ll b/llvm/test/CodeGen/AArch64/ehcontguard.ll
index a015851df0935..eecff391d0f8c 100644
--- a/llvm/test/CodeGen/AArch64/ehcontguard.ll
+++ b/llvm/test/CodeGen/AArch64/ehcontguard.ll
@@ -5,14 +5,14 @@
 
 ; CHECK: .section .gehcont$y
 
-define dso_local void @"?func1@@YAXXZ"() #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?func1@@YAXXZ"() #0 personality ptr @__CxxFrameHandler3 {
 entry:
   invoke void @"?func2@@YAXXZ"()
           to label %invoke.cont unwind label %catch.dispatch
 catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %catch] unwind to caller
 catch:                                            ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null, i32 64, i8* null]
+  %1 = catchpad within %0 [ptr null, i32 64, ptr null]
   catchret from %1 to label %catchret.dest
 catchret.dest:                                    ; preds = %catch
   br label %try.cont

diff  --git a/llvm/test/CodeGen/AArch64/elf-globals-pic.ll b/llvm/test/CodeGen/AArch64/elf-globals-pic.ll
index 9e4a5762203f6..ebb6a86e20216 100644
--- a/llvm/test/CodeGen/AArch64/elf-globals-pic.ll
+++ b/llvm/test/CodeGen/AArch64/elf-globals-pic.ll
@@ -7,8 +7,8 @@
 @var64 = external global i64, align 8
 
 define i8 @test_i8(i8 %new) {
-  %val = load i8, i8* @var8, align 1
-  store i8 %new, i8* @var8
+  %val = load i8, ptr @var8, align 1
+  store i8 %new, ptr @var8
   ret i8 %val
 ; CHECK-PIC-LABEL: test_i8:
 ; CHECK-PIC: adrp x[[HIREG:[0-9]+]], :got:var8
@@ -21,33 +21,33 @@ define i8 @test_i8(i8 %new) {
 }
 
 define i16 @test_i16(i16 %new) {
-  %val = load i16, i16* @var16, align 2
-  store i16 %new, i16* @var16
+  %val = load i16, ptr @var16, align 2
+  store i16 %new, ptr @var16
   ret i16 %val
 }
 
 define i32 @test_i32(i32 %new) {
-  %val = load i32, i32* @var32, align 4
-  store i32 %new, i32* @var32
+  %val = load i32, ptr @var32, align 4
+  store i32 %new, ptr @var32
   ret i32 %val
 }
 
 define i64 @test_i64(i64 %new) {
-  %val = load i64, i64* @var64, align 8
-  store i64 %new, i64* @var64
+  %val = load i64, ptr @var64, align 8
+  store i64 %new, ptr @var64
   ret i64 %val
 }
 
-define i64* @test_addr() {
-  ret i64* @var64
+define ptr @test_addr() {
+  ret ptr @var64
 }
 
 @hiddenvar = hidden global i32 0, align 4
 @protectedvar = protected global i32 0, align 4
 
 define i32 @test_vis() {
-  %lhs = load i32, i32* @hiddenvar, align 4
-  %rhs = load i32, i32* @protectedvar, align 4
+  %lhs = load i32, ptr @hiddenvar, align 4
+  %rhs = load i32, ptr @protectedvar, align 4
   %ret = add i32 %lhs, %rhs
   ret i32 %ret
 ; CHECK-PIC-LABEL: test_vis:
@@ -60,13 +60,11 @@ define i32 @test_vis() {
 @var_default = external global [2 x i32]
 
 define i32 @test_default_align() {
-  %addr = getelementptr [2 x i32], [2 x i32]* @var_default, i32 0, i32 0
-  %val = load i32, i32* %addr
+  %val = load i32, ptr @var_default
   ret i32 %val
 }
 
 define i64 @test_default_unaligned() {
-  %addr = bitcast [2 x i32]* @var_default to i64*
-  %val = load i64, i64* %addr
+  %val = load i64, ptr @var_default
   ret i64 %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/elf-globals-static.ll b/llvm/test/CodeGen/AArch64/elf-globals-static.ll
index 096515e500f10..86b7c401b9a2e 100644
--- a/llvm/test/CodeGen/AArch64/elf-globals-static.ll
+++ b/llvm/test/CodeGen/AArch64/elf-globals-static.ll
@@ -7,8 +7,8 @@
 @var64 = external dso_local global i64, align 8
 
 define i8 @test_i8(i8 %new) {
-  %val = load i8, i8* @var8, align 1
-  store i8 %new, i8* @var8
+  %val = load i8, ptr @var8, align 1
+  store i8 %new, ptr @var8
   ret i8 %val
 ; CHECK-LABEL: test_i8:
 ; CHECK: adrp x[[HIREG:[0-9]+]], var8
@@ -26,8 +26,8 @@ define i8 @test_i8(i8 %new) {
 }
 
 define i16 @test_i16(i16 %new) {
-  %val = load i16, i16* @var16, align 2
-  store i16 %new, i16* @var16
+  %val = load i16, ptr @var16, align 2
+  store i16 %new, ptr @var16
   ret i16 %val
 ; CHECK-LABEL: test_i16:
 ; CHECK: adrp x[[HIREG:[0-9]+]], var16
@@ -40,8 +40,8 @@ define i16 @test_i16(i16 %new) {
 }
 
 define i32 @test_i32(i32 %new) {
-  %val = load i32, i32* @var32, align 4
-  store i32 %new, i32* @var32
+  %val = load i32, ptr @var32, align 4
+  store i32 %new, ptr @var32
   ret i32 %val
 ; CHECK-LABEL: test_i32:
 ; CHECK: adrp x[[HIREG:[0-9]+]], var32
@@ -54,8 +54,8 @@ define i32 @test_i32(i32 %new) {
 }
 
 define i64 @test_i64(i64 %new) {
-  %val = load i64, i64* @var64, align 8
-  store i64 %new, i64* @var64
+  %val = load i64, ptr @var64, align 8
+  store i64 %new, ptr @var64
   ret i64 %val
 ; CHECK-LABEL: test_i64:
 ; CHECK: adrp x[[HIREG:[0-9]+]], var64
@@ -67,8 +67,8 @@ define i64 @test_i64(i64 %new) {
 ; CHECK-FAST: add {{x[0-9]+}}, x[[HIREG]], :lo12:var64
 }
 
-define i64* @test_addr() {
-  ret i64* @var64
+define ptr @test_addr() {
+  ret ptr @var64
 ; CHECK-LABEL: test_addr:
 ; CHECK: adrp [[HIREG:x[0-9]+]], var64
 ; CHECK: add x0, [[HIREG]], :lo12:var64
@@ -81,8 +81,7 @@ define i64* @test_addr() {
 @var_default = external dso_local global [2 x i32]
 
 define i32 @test_default_align() {
-  %addr = getelementptr [2 x i32], [2 x i32]* @var_default, i32 0, i32 0
-  %val = load i32, i32* %addr
+  %val = load i32, ptr @var_default
   ret i32 %val
 ; CHECK-LABEL: test_default_align:
 ; CHECK: adrp x[[HIREG:[0-9]+]], var_default
@@ -90,8 +89,7 @@ define i32 @test_default_align() {
 }
 
 define i64 @test_default_unaligned() {
-  %addr = bitcast [2 x i32]* @var_default to i64*
-  %val = load i64, i64* %addr
+  %val = load i64, ptr @var_default
   ret i64 %val
 ; CHECK-LABEL: test_default_unaligned:
 ; CHECK: adrp [[HIREG:x[0-9]+]], var_default

diff  --git a/llvm/test/CodeGen/AArch64/elf-preemption.ll b/llvm/test/CodeGen/AArch64/elf-preemption.ll
index 3b77f5b8e02a6..dc41f0f2d8ab3 100644
--- a/llvm/test/CodeGen/AArch64/elf-preemption.ll
+++ b/llvm/test/CodeGen/AArch64/elf-preemption.ll
@@ -3,17 +3,17 @@
 ; RUN: llc -mtriple=aarch64 -relocation-model=pic < %s | FileCheck %s --check-prefixes=CHECK,PIC
 
 @preemptable_var = dso_preemptable global i32 42
-define i32* @get_preemptable_var() nounwind {
+define ptr @get_preemptable_var() nounwind {
 ; CHECK-LABEL: get_preemptable_var:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x0, :got:preemptable_var
 ; CHECK-NEXT:    ldr x0, [x0, :got_lo12:preemptable_var]
 ; CHECK-NEXT:    ret
-  ret i32* @preemptable_var
+  ret ptr @preemptable_var
 }
 
 @dsolocal_var = dso_local global i32 42
-define i32* @get_dsolocal_var() nounwind {
+define ptr @get_dsolocal_var() nounwind {
 ; STATIC-LABEL: get_dsolocal_var:
 ; STATIC:       // %bb.0:
 ; STATIC-NEXT:    adrp x0, dsolocal_var
@@ -25,49 +25,49 @@ define i32* @get_dsolocal_var() nounwind {
 ; PIC-NEXT:    adrp x0, .Ldsolocal_var$local
 ; PIC-NEXT:    add x0, x0, :lo12:.Ldsolocal_var$local
 ; PIC-NEXT:    ret
-  ret i32* @dsolocal_var
+  ret ptr @dsolocal_var
 }
 
 @weak_dsolocal_var = weak dso_local global i32 42
-define i32* @get_weak_dsolocal_var() nounwind {
+define ptr @get_weak_dsolocal_var() nounwind {
 ; CHECK-LABEL: get_weak_dsolocal_var:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x0, weak_dsolocal_var
 ; CHECK-NEXT:    add x0, x0, :lo12:weak_dsolocal_var
 ; CHECK-NEXT:    ret
-  ret i32* @weak_dsolocal_var
+  ret ptr @weak_dsolocal_var
 }
 
 @hidden_var = hidden global i32 42
-define i32* @get_hidden_var() nounwind {
+define ptr @get_hidden_var() nounwind {
 ; CHECK-LABEL: get_hidden_var:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x0, hidden_var
 ; CHECK-NEXT:    add x0, x0, :lo12:hidden_var
 ; CHECK-NEXT:    ret
-  ret i32* @hidden_var
+  ret ptr @hidden_var
 }
 
 @protected_var = protected global i32 42
-define i32* @get_protected_var() nounwind {
+define ptr @get_protected_var() nounwind {
 ; CHECK-LABEL: get_protected_var:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x0, protected_var
 ; CHECK-NEXT:    add x0, x0, :lo12:protected_var
 ; CHECK-NEXT:    ret
-  ret i32* @protected_var
+  ret ptr @protected_var
 }
 
-define dso_preemptable void()* @preemptable_func() nounwind {
+define dso_preemptable ptr @preemptable_func() nounwind {
 ; CHECK-LABEL: preemptable_func:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x0, :got:preemptable_func
 ; CHECK-NEXT:    ldr x0, [x0, :got_lo12:preemptable_func]
 ; CHECK-NEXT:    ret
-  ret void()* bitcast(void()*()* @preemptable_func to void()*)
+  ret ptr @preemptable_func
 }
 
-define dso_local void()* @dsolocal_func() nounwind {
+define dso_local ptr @dsolocal_func() nounwind {
 ; STATIC-LABEL: dsolocal_func:
 ; STATIC:       // %bb.0:
 ; STATIC-NEXT:    adrp x0, dsolocal_func
@@ -81,7 +81,7 @@ define dso_local void()* @dsolocal_func() nounwind {
 ; PIC-NEXT:    adrp x0, .Ldsolocal_func$local
 ; PIC-NEXT:    add x0, x0, :lo12:.Ldsolocal_func$local
 ; PIC-NEXT:    ret
-  ret void()* bitcast(void()*()* @dsolocal_func to void()*)
+  ret ptr @dsolocal_func
 }
 ; UTC-ARGS: --disable
 ; PIC: [[END_LABEL:.Lfunc_end.+]]:
@@ -89,13 +89,13 @@ define dso_local void()* @dsolocal_func() nounwind {
 ; PIC-NEXT: .size	.Ldsolocal_func$local, [[END_LABEL]]-dsolocal_func
 ; UTC-ARGS: --enable
 
-define weak dso_local void()* @weak_dsolocal_func() nounwind {
+define weak dso_local ptr @weak_dsolocal_func() nounwind {
 ; CHECK-LABEL: weak_dsolocal_func:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x0, weak_dsolocal_func
 ; CHECK-NEXT:    add x0, x0, :lo12:weak_dsolocal_func
 ; CHECK-NEXT:    ret
-  ret void()* bitcast(void()*()* @weak_dsolocal_func to void()*)
+  ret ptr @weak_dsolocal_func
 }
 
 ;; bl .Ldsolocal_func$local either resolves to a constant at assembly time
@@ -116,7 +116,7 @@ define dso_local void @call_dsolocal_func() nounwind {
 ; PIC-NEXT:    bl .Ldsolocal_func$local
 ; PIC-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; PIC-NEXT:    ret
-  call void()* @dsolocal_func()
+  call ptr @dsolocal_func()
   ret void
 }
 ; UTC-ARGS: --disable

diff  --git a/llvm/test/CodeGen/AArch64/eliminate-trunc.ll b/llvm/test/CodeGen/AArch64/eliminate-trunc.ll
index 83730d15d7f5f..1531d8c88e99b 100644
--- a/llvm/test/CodeGen/AArch64/eliminate-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/eliminate-trunc.ll
@@ -6,7 +6,7 @@
 ; CHECK-NOT: add {{x[0-9]+}}, {{x[0-9]+}}, #1
 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #1
 ; CHECK-NEXT: cmp {{w[0-9]+}}, {{w[0-9]+}}
-define void @test1_signed([8 x i8]* nocapture %a, i8* nocapture readonly %box, i8 %limit, i64 %inv) minsize {
+define void @test1_signed(ptr nocapture %a, ptr nocapture readonly %box, i8 %limit, i64 %inv) minsize {
 entry:
   %conv = zext i8 %limit to i32
   %cmp223 = icmp eq i8 %limit, 0
@@ -14,12 +14,12 @@ entry:
 
 for.body4.us:
   %indvars.iv = phi i64 [ 0, %for.body4.lr.ph.us ], [ %indvars.iv.next, %for.body4.us ]
-  %arrayidx6.us = getelementptr inbounds [8 x i8], [8 x i8]* %a, i64 %indvars.iv, i64 %inv
-  %0 = load i8, i8* %arrayidx6.us, align 1
+  %arrayidx6.us = getelementptr inbounds [8 x i8], ptr %a, i64 %indvars.iv, i64 %inv
+  %0 = load i8, ptr %arrayidx6.us, align 1
   %idxprom7.us = zext i8 %0 to i64
-  %arrayidx8.us = getelementptr inbounds i8, i8* %box, i64 %idxprom7.us
-  %1 = load i8, i8* %arrayidx8.us, align 1
-  store i8 %1, i8* %arrayidx6.us, align 1
+  %arrayidx8.us = getelementptr inbounds i8, ptr %box, i64 %idxprom7.us
+  %1 = load i8, ptr %arrayidx8.us, align 1
+  store i8 %1, ptr %arrayidx6.us, align 1
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %2 = trunc i64 %indvars.iv.next to i32
   %cmp2.us = icmp slt i32 %2, %conv

diff  --git a/llvm/test/CodeGen/AArch64/emutls.ll b/llvm/test/CodeGen/AArch64/emutls.ll
index 4fdc99d0b7e01..4ca9fd5cd87d6 100644
--- a/llvm/test/CodeGen/AArch64/emutls.ll
+++ b/llvm/test/CodeGen/AArch64/emutls.ll
@@ -6,8 +6,8 @@
 ; Copied from X86/emutls.ll
 
 ; Use my_emutls_get_address like __emutls_get_address.
- at my_emutls_v_xyz = external global i8*, align 4
-declare i8* @my_emutls_get_address(i8*)
+ at my_emutls_v_xyz = external global ptr, align 4
+declare ptr @my_emutls_get_address(ptr)
 
 define i32 @my_get_xyz() uwtable {
 ; ARM64-LABEL: my_get_xyz:
@@ -19,10 +19,9 @@ define i32 @my_get_xyz() uwtable {
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:
-  %call = call i8* @my_emutls_get_address(i8* bitcast (i8** @my_emutls_v_xyz to i8*))
-  %0 = bitcast i8* %call to i32*
-  %1 = load i32, i32* %0, align 4
-  ret i32 %1
+  %call = call ptr @my_emutls_get_address(ptr @my_emutls_v_xyz)
+  %0 = load i32, ptr %call, align 4
+  ret i32 %0
 }
 
 @i1 = thread_local global i32 15
@@ -43,11 +42,11 @@ define i32 @f1() uwtable {
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:
-  %tmp1 = load i32, i32* @i1
+  %tmp1 = load i32, ptr @i1
   ret i32 %tmp1
 }
 
-define i32* @f2() uwtable {
+define ptr @f2() uwtable {
 ; ARM64-LABEL: f2:
 ; ARM64:        adrp x0, :got:__emutls_v.i1
 ; ARM64-NEXT:   ldr x0, [x0, :got_lo12:__emutls_v.i1]
@@ -56,7 +55,7 @@ define i32* @f2() uwtable {
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:
-  ret i32* @i1
+  ret ptr @i1
 }
 
 define i32 @f5() nounwind {
@@ -67,11 +66,11 @@ define i32 @f5() nounwind {
 ; ARM64-NEXT:   ldr w0, [x0]
 
 entry:
-  %tmp1 = load i32, i32* @i3
+  %tmp1 = load i32, ptr @i3
   ret i32 %tmp1
 }
 
-define i32* @f6() uwtable {
+define ptr @f6() uwtable {
 ; ARM64-LABEL: f6:
 ; ARM64:        adrp x0, __emutls_v.i3
 ; ARM64:        add x0, x0, :lo12:__emutls_v.i3
@@ -80,7 +79,7 @@ define i32* @f6() uwtable {
 ; ARM64-NEXT:   ldp x29, x30, [sp]
 
 entry:
-  ret i32* @i3
+  ret ptr @i3
 }
 
 ; Simple test of comdat __thread variables.
@@ -104,9 +103,9 @@ define i32 @_Z7getIntXv() {
 ; ARM64:        str {{.*}}, [x8]
 
 entry:
-  %0 = load i32, i32* @_ZN1AIiE1xE, align 4
+  %0 = load i32, ptr @_ZN1AIiE1xE, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* @_ZN1AIiE1xE, align 4
+  store i32 %inc, ptr @_ZN1AIiE1xE, align 4
   ret i32 %0
 }
 
@@ -120,9 +119,9 @@ define float @_Z9getFloatXv() {
 ; ARM64:        str s{{.*}}, [x0]
 
 entry:
-  %0 = load float, float* @_ZN1AIfE1xE, align 4
+  %0 = load float, ptr @_ZN1AIfE1xE, align 4
   %inc = fadd float %0, 1.000000e+00
-  store float %inc, float* @_ZN1AIfE1xE, align 4
+  store float %inc, ptr @_ZN1AIfE1xE, align 4
   ret float %0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/emutls_generic.ll b/llvm/test/CodeGen/AArch64/emutls_generic.ll
index 840833972881a..00d75652ad33d 100644
--- a/llvm/test/CodeGen/AArch64/emutls_generic.ll
+++ b/llvm/test/CodeGen/AArch64/emutls_generic.ll
@@ -27,19 +27,19 @@
 @external_y = thread_local global i8 7, align 2
 @internal_y = internal thread_local global i64 9, align 16
 
-define i32* @get_external_x() {
+define ptr @get_external_x() {
 entry:
-  ret i32* @external_x
+  ret ptr @external_x
 }
 
-define i8* @get_external_y() {
+define ptr @get_external_y() {
 entry:
-  ret i8* @external_y
+  ret ptr @external_y
 }
 
-define i64* @get_internal_y() {
+define ptr @get_internal_y() {
 entry:
-  ret i64* @internal_y
+  ret ptr @internal_y
 }
 
 ; ARM_64-LABEL:  get_external_x:

diff  --git a/llvm/test/CodeGen/AArch64/expand-select.ll b/llvm/test/CodeGen/AArch64/expand-select.ll
index b9bd3e89c66bf..57d24fe86ea4a 100644
--- a/llvm/test/CodeGen/AArch64/expand-select.ll
+++ b/llvm/test/CodeGen/AArch64/expand-select.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-unknown-linux-gnu -O3 %s -o - | FileCheck %s
 
-define void @foo(i32 %In1, <2 x i128> %In2, <2 x i128> %In3, <2 x i128> *%Out) {
+define void @foo(i32 %In1, <2 x i128> %In2, <2 x i128> %In3, ptr %Out) {
 ; CHECK-LABEL: foo:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0x1
@@ -22,13 +22,13 @@ define void @foo(i32 %In1, <2 x i128> %In2, <2 x i128> %In3, <2 x i128> *%Out) {
   %cond = and i32 %In1, 1
   %cbool = icmp eq i32 %cond, 0
   %res = select i1 %cbool, <2 x i128> %In2, <2 x i128> %In3
-  store <2 x i128> %res, <2 x i128> *%Out
+  store <2 x i128> %res, ptr %Out
 
   ret void
 }
 
 ; Check case when scalar size is not power of 2.
-define void @bar(i32 %In1, <2 x i96> %In2, <2 x i96> %In3, <2 x i96> *%Out) {
+define void @bar(i32 %In1, <2 x i96> %In2, <2 x i96> %In3, ptr %Out) {
 ; CHECK-LABEL: bar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0x1
@@ -55,7 +55,7 @@ define void @bar(i32 %In1, <2 x i96> %In2, <2 x i96> %In3, <2 x i96> *%Out) {
   %cond = and i32 %In1, 1
   %cbool = icmp eq i32 %cond, 0
   %res = select i1 %cbool, <2 x i96> %In2, <2 x i96> %In3
-  store <2 x i96> %res, <2 x i96> *%Out
+  store <2 x i96> %res, ptr %Out
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/extern-weak.ll b/llvm/test/CodeGen/AArch64/extern-weak.ll
index 0746701c67fc8..b1e69d3de890d 100644
--- a/llvm/test/CodeGen/AArch64/extern-weak.ll
+++ b/llvm/test/CodeGen/AArch64/extern-weak.ll
@@ -5,11 +5,11 @@
 
 declare extern_weak dso_local i32 @var()
 
-define i32()* @foo() {
+define ptr @foo() {
 ; The usual ADRP/ADD pair can't be used for a weak reference because it must
 ; evaluate to 0 if the symbol is undefined. We use a GOT entry for PIC
 ; otherwise a litpool entry.
-  ret i32()* @var
+  ret ptr @var
 
 
 ; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:var
@@ -29,15 +29,15 @@ define i32()* @foo() {
 
 @arr_var = extern_weak global [10 x i32]
 
-define i32* @bar() {
-  %addr = getelementptr [10 x i32], [10 x i32]* @arr_var, i32 0, i32 5
+define ptr @bar() {
+  %addr = getelementptr [10 x i32], ptr @arr_var, i32 0, i32 5
 
 
 ; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:arr_var
 ; CHECK: ldr [[BASE:x[0-9]+]], [x[[ADDRHI]], :got_lo12:arr_var]
 ; CHECK: add x0, [[BASE]], #20
 
-  ret i32* %addr
+  ret ptr %addr
 
   ; Note, In the large model, if dso_local, the relocations are absolute and can materialise 0.
 ; CHECK-LARGE:      adrp x[[ADDR:[0-9]+]], :got:arr_var
@@ -50,8 +50,8 @@ define i32* @bar() {
 
 @defined_weak_var = internal unnamed_addr global i32 0
 
-define i32* @wibble() {
-  ret i32* @defined_weak_var
+define ptr @wibble() {
+  ret ptr @defined_weak_var
 
 ; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
 ; CHECK: add x0, [[BASE]], :lo12:defined_weak_var

diff  --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll
index 3ab25ac2fe59d..2913d73886998 100644
--- a/llvm/test/CodeGen/AArch64/extract-bits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-bits.ll
@@ -68,7 +68,7 @@ define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
   ret i32 %masked
 }
 
-define i32 @bextr32_a2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_a2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w9, [x0]
@@ -78,7 +78,7 @@ define i32 @bextr32_a2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    lsr w9, w9, w1
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %shifted = lshr i32 %val, %numskipbits
   %onebit = shl i32 1, %numlowbits
   %mask = add nsw i32 %onebit, -1
@@ -86,7 +86,7 @@ define i32 @bextr32_a2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
   ret i32 %masked
 }
 
-define i32 @bextr32_a3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_a3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w9, [x0]
@@ -96,7 +96,7 @@ define i32 @bextr32_a3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroe
 ; CHECK-NEXT:    lsr w9, w9, w1
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %skip = zext i8 %numskipbits to i32
   %shifted = lshr i32 %val, %skip
   %conv = zext i8 %numlowbits to i32
@@ -176,7 +176,7 @@ define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %
   ret i64 %masked
 }
 
-define i64 @bextr64_a2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_a2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -186,7 +186,7 @@ define i64 @bextr64_a2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    lsr x9, x9, x1
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %shifted = lshr i64 %val, %numskipbits
   %onebit = shl i64 1, %numlowbits
   %mask = add nsw i64 %onebit, -1
@@ -194,7 +194,7 @@ define i64 @bextr64_a2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
   ret i64 %masked
 }
 
-define i64 @bextr64_a3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_a3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -206,7 +206,7 @@ define i64 @bextr64_a3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroe
 ; CHECK-NEXT:    lsr x9, x9, x1
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %skip = zext i8 %numskipbits to i64
   %shifted = lshr i64 %val, %skip
   %conv = zext i8 %numlowbits to i64
@@ -326,7 +326,7 @@ define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
   ret i32 %masked
 }
 
-define i32 @bextr32_b2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_b2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w9, [x0]
@@ -335,7 +335,7 @@ define i32 @bextr32_b2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    lsr w9, w9, w1
 ; CHECK-NEXT:    bic w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %shifted = lshr i32 %val, %numskipbits
   %notmask = shl i32 -1, %numlowbits
   %mask = xor i32 %notmask, -1
@@ -343,7 +343,7 @@ define i32 @bextr32_b2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
   ret i32 %masked
 }
 
-define i32 @bextr32_b3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_b3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w9, [x0]
@@ -352,7 +352,7 @@ define i32 @bextr32_b3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroe
 ; CHECK-NEXT:    lsr w9, w9, w1
 ; CHECK-NEXT:    bic w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %skip = zext i8 %numskipbits to i32
   %shifted = lshr i32 %val, %skip
   %conv = zext i8 %numlowbits to i32
@@ -413,7 +413,7 @@ define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %
   ret i64 %masked
 }
 
-define i64 @bextr64_b2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_b2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -422,7 +422,7 @@ define i64 @bextr64_b2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    lsr x9, x9, x1
 ; CHECK-NEXT:    bic x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %shifted = lshr i64 %val, %numskipbits
   %notmask = shl i64 -1, %numlowbits
   %mask = xor i64 %notmask, -1
@@ -430,7 +430,7 @@ define i64 @bextr64_b2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
   ret i64 %masked
 }
 
-define i64 @bextr64_b3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
+define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_b3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -441,7 +441,7 @@ define i64 @bextr64_b3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroe
 ; CHECK-NEXT:    lsr x9, x9, x1
 ; CHECK-NEXT:    bic x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %skip = zext i8 %numskipbits to i64
   %shifted = lshr i64 %val, %skip
   %conv = zext i8 %numlowbits to i64
@@ -567,7 +567,7 @@ define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
   ret i32 %masked
 }
 
-define i32 @bextr32_c2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_c2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w2
@@ -577,7 +577,7 @@ define i32 @bextr32_c2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    lsr w8, w10, w8
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %shifted = lshr i32 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
   %mask = lshr i32 -1, %numhighbits
@@ -585,7 +585,7 @@ define i32 @bextr32_c2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
   ret i32 %masked
 }
 
-define i32 @bextr32_c3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_c3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32
@@ -597,7 +597,7 @@ define i32 @bextr32_c3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits)
 ; CHECK-NEXT:    lsr w8, w10, w8
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %skip = zext i8 %numskipbits to i32
   %shifted = lshr i32 %val, %skip
   %numhighbits = sub i8 32, %numlowbits
@@ -661,7 +661,7 @@ define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) noun
   ret i64 %masked
 }
 
-define i64 @bextr64_c2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_c2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x2
@@ -671,7 +671,7 @@ define i64 @bextr64_c2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    lsr x8, x10, x8
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %shifted = lshr i64 %val, %numskipbits
   %numhighbits = sub i64 64, %numlowbits
   %mask = lshr i64 -1, %numhighbits
@@ -679,7 +679,7 @@ define i64 @bextr64_c2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
   ret i64 %masked
 }
 
-define i64 @bextr64_c3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_c3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64
@@ -691,7 +691,7 @@ define i64 @bextr64_c3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits)
 ; CHECK-NEXT:    lsr x8, x10, x8
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %skip = zext i8 %numskipbits to i64
   %shifted = lshr i64 %val, %skip
   %numhighbits = sub i8 64, %numlowbits
@@ -813,7 +813,7 @@ define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) noun
   ret i32 %masked
 }
 
-define i32 @bextr32_d2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
+define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_d2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -822,7 +822,7 @@ define i32 @bextr32_d2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    lsl w8, w8, w9
 ; CHECK-NEXT:    lsr w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %shifted = lshr i32 %val, %numskipbits
   %numhighbits = sub i32 32, %numlowbits
   %highbitscleared = shl i32 %shifted, %numhighbits
@@ -830,7 +830,7 @@ define i32 @bextr32_d2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
   ret i32 %masked
 }
 
-define i32 @bextr32_d3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr32_d3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32
@@ -841,7 +841,7 @@ define i32 @bextr32_d3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits)
 ; CHECK-NEXT:    lsl w9, w9, w8
 ; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %skip = zext i8 %numskipbits to i32
   %shifted = lshr i32 %val, %skip
   %numhighbits = sub i8 32, %numlowbits
@@ -887,7 +887,7 @@ define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) noun
   ret i64 %masked
 }
 
-define i64 @bextr64_d2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
+define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_d2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -896,7 +896,7 @@ define i64 @bextr64_d2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    lsl x8, x8, x9
 ; CHECK-NEXT:    lsr x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %shifted = lshr i64 %val, %numskipbits
   %numhighbits = sub i64 64, %numlowbits
   %highbitscleared = shl i64 %shifted, %numhighbits
@@ -904,7 +904,7 @@ define i64 @bextr64_d2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
   ret i64 %masked
 }
 
-define i64 @bextr64_d3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
+define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bextr64_d3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64
@@ -915,7 +915,7 @@ define i64 @bextr64_d3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits)
 ; CHECK-NEXT:    lsl x9, x9, x8
 ; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %skip = zext i8 %numskipbits to i64
   %shifted = lshr i64 %val, %skip
   %numhighbits = sub i8 64, %numlowbits
@@ -967,7 +967,7 @@ define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; ---------------------------------------------------------------------------- ;
 
 ; https://bugs.llvm.org/show_bug.cgi?id=38938
-define void @pr38938(i32* %a0, i64* %a1) nounwind {
+define void @pr38938(ptr %a0, ptr %a1) nounwind {
 ; CHECK-LABEL: pr38938:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x1]
@@ -977,13 +977,13 @@ define void @pr38938(i32* %a0, i64* %a1) nounwind {
 ; CHECK-NEXT:    add w9, w9, #1
 ; CHECK-NEXT:    str w9, [x0, x8]
 ; CHECK-NEXT:    ret
-  %tmp = load i64, i64* %a1, align 8
+  %tmp = load i64, ptr %a1, align 8
   %tmp1 = lshr i64 %tmp, 21
   %tmp2 = and i64 %tmp1, 1023
-  %tmp3 = getelementptr inbounds i32, i32* %a0, i64 %tmp2
-  %tmp4 = load i32, i32* %tmp3, align 4
+  %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2
+  %tmp4 = load i32, ptr %tmp3, align 4
   %tmp5 = add nsw i32 %tmp4, 1
-  store i32 %tmp5, i32* %tmp3, align 4
+  store i32 %tmp5, ptr %tmp3, align 4
   ret void
 }
 
@@ -1092,7 +1092,7 @@ define i64 @c4_i64_bad(i64 %arg) nounwind {
 ; i32
 
 ; The most canonical variant
-define void @c5_i32(i32 %arg, i32* %ptr) nounwind {
+define void @c5_i32(i32 %arg, ptr %ptr) nounwind {
 ; CHECK-LABEL: c5_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #19, #10
@@ -1100,12 +1100,12 @@ define void @c5_i32(i32 %arg, i32* %ptr) nounwind {
 ; CHECK-NEXT:    ret
   %tmp0 = lshr i32 %arg, 19
   %tmp1 = and i32 %tmp0, 1023
-  store i32 %tmp1, i32* %ptr
+  store i32 %tmp1, ptr %ptr
   ret void
 }
 
 ; Should be still fine, but the mask is shifted
-define void @c6_i32(i32 %arg, i32* %ptr) nounwind {
+define void @c6_i32(i32 %arg, ptr %ptr) nounwind {
 ; CHECK-LABEL: c6_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #19, #12
@@ -1113,12 +1113,12 @@ define void @c6_i32(i32 %arg, i32* %ptr) nounwind {
 ; CHECK-NEXT:    ret
   %tmp0 = lshr i32 %arg, 19
   %tmp1 = and i32 %tmp0, 4095
-  store i32 %tmp1, i32* %ptr
+  store i32 %tmp1, ptr %ptr
   ret void
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define void @c7_i32(i32 %arg, i32* %ptr) nounwind {
+define void @c7_i32(i32 %arg, ptr %ptr) nounwind {
 ; CHECK-LABEL: c7_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx w8, w0, #19, #10
@@ -1128,14 +1128,14 @@ define void @c7_i32(i32 %arg, i32* %ptr) nounwind {
   %tmp0 = lshr i32 %arg, 19
   %tmp1 = and i32 %tmp0, 1023
   %tmp2 = shl i32 %tmp1, 2
-  store i32 %tmp2, i32* %ptr
+  store i32 %tmp2, ptr %ptr
   ret void
 }
 
 ; i64
 
 ; The most canonical variant
-define void @c5_i64(i64 %arg, i64* %ptr) nounwind {
+define void @c5_i64(i64 %arg, ptr %ptr) nounwind {
 ; CHECK-LABEL: c5_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x8, x0, #51, #10
@@ -1143,12 +1143,12 @@ define void @c5_i64(i64 %arg, i64* %ptr) nounwind {
 ; CHECK-NEXT:    ret
   %tmp0 = lshr i64 %arg, 51
   %tmp1 = and i64 %tmp0, 1023
-  store i64 %tmp1, i64* %ptr
+  store i64 %tmp1, ptr %ptr
   ret void
 }
 
 ; Should be still fine, but the mask is shifted
-define void @c6_i64(i64 %arg, i64* %ptr) nounwind {
+define void @c6_i64(i64 %arg, ptr %ptr) nounwind {
 ; CHECK-LABEL: c6_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x8, x0, #51, #12
@@ -1156,12 +1156,12 @@ define void @c6_i64(i64 %arg, i64* %ptr) nounwind {
 ; CHECK-NEXT:    ret
   %tmp0 = lshr i64 %arg, 51
   %tmp1 = and i64 %tmp0, 4095
-  store i64 %tmp1, i64* %ptr
+  store i64 %tmp1, ptr %ptr
   ret void
 }
 
 ; Should be still fine, but the result is shifted left afterwards
-define void @c7_i64(i64 %arg, i64* %ptr) nounwind {
+define void @c7_i64(i64 %arg, ptr %ptr) nounwind {
 ; CHECK-LABEL: c7_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ubfx x8, x0, #51, #10
@@ -1171,6 +1171,6 @@ define void @c7_i64(i64 %arg, i64* %ptr) nounwind {
   %tmp0 = lshr i64 %arg, 51
   %tmp1 = and i64 %tmp0, 1023
   %tmp2 = shl i64 %tmp1, 2
-  store i64 %tmp2, i64* %ptr
+  store i64 %tmp2, ptr %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/extract-lowbits.ll b/llvm/test/CodeGen/AArch64/extract-lowbits.ll
index 9570843ee6cd5..ecd8e1734245d 100644
--- a/llvm/test/CodeGen/AArch64/extract-lowbits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-lowbits.ll
@@ -47,7 +47,7 @@ define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
   ret i32 %masked
 }
 
-define i32 @bzhi32_a2_load(i32* %w, i32 %numlowbits) nounwind {
+define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_a2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -56,14 +56,14 @@ define i32 @bzhi32_a2_load(i32* %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    sub w8, w8, #1
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %onebit = shl i32 1, %numlowbits
   %mask = add nsw i32 %onebit, -1
   %masked = and i32 %mask, %val
   ret i32 %masked
 }
 
-define i32 @bzhi32_a3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
+define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_a3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -72,7 +72,7 @@ define i32 @bzhi32_a3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    sub w8, w8, #1
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %conv = zext i8 %numlowbits to i32
   %onebit = shl i32 1, %conv
   %mask = add nsw i32 %onebit, -1
@@ -126,7 +126,7 @@ define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_a2_load(i64* %w, i64 %numlowbits) nounwind {
+define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_a2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -135,14 +135,14 @@ define i64 @bzhi64_a2_load(i64* %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %onebit = shl i64 1, %numlowbits
   %mask = add nsw i64 %onebit, -1
   %masked = and i64 %mask, %val
   ret i64 %masked
 }
 
-define i64 @bzhi64_a3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
+define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_a3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -152,7 +152,7 @@ define i64 @bzhi64_a3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %conv = zext i8 %numlowbits to i64
   %onebit = shl i64 1, %conv
   %mask = add nsw i64 %onebit, -1
@@ -205,7 +205,7 @@ define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
   ret i32 %masked
 }
 
-define i32 @bzhi32_b2_load(i32* %w, i32 %numlowbits) nounwind {
+define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_b2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -213,14 +213,14 @@ define i32 @bzhi32_b2_load(i32* %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl w9, w9, w1
 ; CHECK-NEXT:    bic w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %notmask = shl i32 -1, %numlowbits
   %mask = xor i32 %notmask, -1
   %masked = and i32 %mask, %val
   ret i32 %masked
 }
 
-define i32 @bzhi32_b3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
+define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_b3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -228,7 +228,7 @@ define i32 @bzhi32_b3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl w9, w9, w1
 ; CHECK-NEXT:    bic w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %conv = zext i8 %numlowbits to i32
   %notmask = shl i32 -1, %conv
   %mask = xor i32 %notmask, -1
@@ -279,7 +279,7 @@ define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_b2_load(i64* %w, i64 %numlowbits) nounwind {
+define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_b2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -287,14 +287,14 @@ define i64 @bzhi64_b2_load(i64* %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl x9, x9, x1
 ; CHECK-NEXT:    bic x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %notmask = shl i64 -1, %numlowbits
   %mask = xor i64 %notmask, -1
   %masked = and i64 %mask, %val
   ret i64 %masked
 }
 
-define i64 @bzhi64_b3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
+define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_b3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
@@ -303,7 +303,7 @@ define i64 @bzhi64_b3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl x9, x9, x1
 ; CHECK-NEXT:    bic x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %conv = zext i8 %numlowbits to i64
   %notmask = shl i64 -1, %conv
   %mask = xor i64 %notmask, -1
@@ -358,7 +358,7 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
   ret i32 %masked
 }
 
-define i32 @bzhi32_c2_load(i32* %w, i32 %numlowbits) nounwind {
+define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_c2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -367,14 +367,14 @@ define i32 @bzhi32_c2_load(i32* %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsr w8, w10, w8
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %numhighbits = sub i32 32, %numlowbits
   %mask = lshr i32 -1, %numhighbits
   %masked = and i32 %mask, %val
   ret i32 %masked
 }
 
-define i32 @bzhi32_c3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_c3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32
@@ -384,7 +384,7 @@ define i32 @bzhi32_c3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsr w8, w10, w8
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %numhighbits = sub i8 32, %numlowbits
   %sh_prom = zext i8 %numhighbits to i32
   %mask = lshr i32 -1, %sh_prom
@@ -438,7 +438,7 @@ define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_c2_load(i64* %w, i64 %numlowbits) nounwind {
+define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_c2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -447,14 +447,14 @@ define i64 @bzhi64_c2_load(i64* %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsr x8, x10, x8
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %numhighbits = sub i64 64, %numlowbits
   %mask = lshr i64 -1, %numhighbits
   %masked = and i64 %mask, %val
   ret i64 %masked
 }
 
-define i64 @bzhi64_c3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_c3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64
@@ -464,7 +464,7 @@ define i64 @bzhi64_c3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsr x8, x10, x8
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %numhighbits = sub i8 64, %numlowbits
   %sh_prom = zext i8 %numhighbits to i64
   %mask = lshr i64 -1, %sh_prom
@@ -518,7 +518,7 @@ define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
   ret i32 %masked
 }
 
-define i32 @bzhi32_d2_load(i32* %w, i32 %numlowbits) nounwind {
+define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_d2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -526,14 +526,14 @@ define i32 @bzhi32_d2_load(i32* %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl w9, w9, w8
 ; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %numhighbits = sub i32 32, %numlowbits
   %highbitscleared = shl i32 %val, %numhighbits
   %masked = lshr i32 %highbitscleared, %numhighbits
   ret i32 %masked
 }
 
-define i32 @bzhi32_d3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
+define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi32_d3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32
@@ -542,7 +542,7 @@ define i32 @bzhi32_d3_load_indexzext(i32* %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl w9, w9, w8
 ; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %w
+  %val = load i32, ptr %w
   %numhighbits = sub i8 32, %numlowbits
   %sh_prom = zext i8 %numhighbits to i32
   %highbitscleared = shl i32 %val, %sh_prom
@@ -580,7 +580,7 @@ define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_d2_load(i64* %w, i64 %numlowbits) nounwind {
+define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_d2_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -588,14 +588,14 @@ define i64 @bzhi64_d2_load(i64* %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl x9, x9, x8
 ; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %numhighbits = sub i64 64, %numlowbits
   %highbitscleared = shl i64 %val, %numhighbits
   %masked = lshr i64 %highbitscleared, %numhighbits
   ret i64 %masked
 }
 
-define i64 @bzhi64_d3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
+define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind {
 ; CHECK-LABEL: bzhi64_d3_load_indexzext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64
@@ -604,7 +604,7 @@ define i64 @bzhi64_d3_load_indexzext(i64* %w, i8 %numlowbits) nounwind {
 ; CHECK-NEXT:    lsl x9, x9, x8
 ; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %w
+  %val = load i64, ptr %w
   %numhighbits = sub i8 64, %numlowbits
   %sh_prom = zext i8 %numhighbits to i64
   %highbitscleared = shl i64 %val, %sh_prom
@@ -627,13 +627,13 @@ define i32 @bzhi32_constant_mask32(i32 %val) nounwind {
   ret i32 %masked
 }
 
-define i32 @bzhi32_constant_mask32_load(i32* %val) nounwind {
+define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind {
 ; CHECK-LABEL: bzhi32_constant_mask32_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    and w0, w8, #0x7fffffff
 ; CHECK-NEXT:    ret
-  %val1 = load i32, i32* %val
+  %val1 = load i32, ptr %val
   %masked = and i32 %val1, 2147483647
   ret i32 %masked
 }
@@ -647,13 +647,13 @@ define i32 @bzhi32_constant_mask16(i32 %val) nounwind {
   ret i32 %masked
 }
 
-define i32 @bzhi32_constant_mask16_load(i32* %val) nounwind {
+define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind {
 ; CHECK-LABEL: bzhi32_constant_mask16_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    and w0, w8, #0x7fff
 ; CHECK-NEXT:    ret
-  %val1 = load i32, i32* %val
+  %val1 = load i32, ptr %val
   %masked = and i32 %val1, 32767
   ret i32 %masked
 }
@@ -667,13 +667,13 @@ define i32 @bzhi32_constant_mask8(i32 %val) nounwind {
   ret i32 %masked
 }
 
-define i32 @bzhi32_constant_mask8_load(i32* %val) nounwind {
+define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind {
 ; CHECK-LABEL: bzhi32_constant_mask8_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    and w0, w8, #0x7f
 ; CHECK-NEXT:    ret
-  %val1 = load i32, i32* %val
+  %val1 = load i32, ptr %val
   %masked = and i32 %val1, 127
   ret i32 %masked
 }
@@ -689,13 +689,13 @@ define i64 @bzhi64_constant_mask64(i64 %val) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_constant_mask64_load(i64* %val) nounwind {
+define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind {
 ; CHECK-LABEL: bzhi64_constant_mask64_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    and x0, x8, #0x3fffffffffffffff
 ; CHECK-NEXT:    ret
-  %val1 = load i64, i64* %val
+  %val1 = load i64, ptr %val
   %masked = and i64 %val1, 4611686018427387903
   ret i64 %masked
 }
@@ -709,13 +709,13 @@ define i64 @bzhi64_constant_mask32(i64 %val) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_constant_mask32_load(i64* %val) nounwind {
+define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind {
 ; CHECK-LABEL: bzhi64_constant_mask32_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    and x0, x8, #0x7fffffff
 ; CHECK-NEXT:    ret
-  %val1 = load i64, i64* %val
+  %val1 = load i64, ptr %val
   %masked = and i64 %val1, 2147483647
   ret i64 %masked
 }
@@ -729,13 +729,13 @@ define i64 @bzhi64_constant_mask16(i64 %val) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_constant_mask16_load(i64* %val) nounwind {
+define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind {
 ; CHECK-LABEL: bzhi64_constant_mask16_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    and x0, x8, #0x7fff
 ; CHECK-NEXT:    ret
-  %val1 = load i64, i64* %val
+  %val1 = load i64, ptr %val
   %masked = and i64 %val1, 32767
   ret i64 %masked
 }
@@ -749,13 +749,13 @@ define i64 @bzhi64_constant_mask8(i64 %val) nounwind {
   ret i64 %masked
 }
 
-define i64 @bzhi64_constant_mask8_load(i64* %val) nounwind {
+define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind {
 ; CHECK-LABEL: bzhi64_constant_mask8_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    and x0, x8, #0x7f
 ; CHECK-NEXT:    ret
-  %val1 = load i64, i64* %val
+  %val1 = load i64, ptr %val
   %masked = and i64 %val1, 127
   ret i64 %masked
 }

diff  --git a/llvm/test/CodeGen/AArch64/f16-convert.ll b/llvm/test/CodeGen/AArch64/f16-convert.ll
index 8caa1f574a7b2..03c7fe2e975ed 100644
--- a/llvm/test/CodeGen/AArch64/f16-convert.ll
+++ b/llvm/test/CodeGen/AArch64/f16-convert.ll
@@ -1,137 +1,137 @@
 ; RUN: llc < %s -mtriple=arm64-apple-ios -asm-verbose=false | FileCheck %s
 
-define float @load0(i16* nocapture readonly %a) nounwind {
+define float @load0(ptr nocapture readonly %a) nounwind {
 ; CHECK-LABEL: load0:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0]
 ; CHECK-NEXT: fcvt s0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %tmp = load i16, i16* %a, align 2
+  %tmp = load i16, ptr %a, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
 
-define double @load1(i16* nocapture readonly %a) nounwind {
+define double @load1(ptr nocapture readonly %a) nounwind {
 ; CHECK-LABEL: load1:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0]
 ; CHECK-NEXT: fcvt d0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %tmp = load i16, i16* %a, align 2
+  %tmp = load i16, ptr %a, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
 
-define float @load2(i16* nocapture readonly %a, i32 %i) nounwind {
+define float @load2(ptr nocapture readonly %a, i32 %i) nounwind {
 ; CHECK-LABEL: load2:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, w1, sxtw #1]
 ; CHECK-NEXT: fcvt s0, [[HREG]]
 ; CHECK-NEXT: ret
 
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %idxprom
+  %tmp = load i16, ptr %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
 
-define double @load3(i16* nocapture readonly %a, i32 %i) nounwind {
+define double @load3(ptr nocapture readonly %a, i32 %i) nounwind {
 ; CHECK-LABEL: load3:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, w1, sxtw #1]
 ; CHECK-NEXT: fcvt d0, [[HREG]]
 ; CHECK-NEXT: ret
 
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %idxprom
+  %tmp = load i16, ptr %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
 
-define float @load4(i16* nocapture readonly %a, i64 %i) nounwind {
+define float @load4(ptr nocapture readonly %a, i64 %i) nounwind {
 ; CHECK-LABEL: load4:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, x1, lsl #1]
 ; CHECK-NEXT: fcvt s0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %i
+  %tmp = load i16, ptr %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
 
-define double @load5(i16* nocapture readonly %a, i64 %i) nounwind {
+define double @load5(ptr nocapture readonly %a, i64 %i) nounwind {
 ; CHECK-LABEL: load5:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, x1, lsl #1]
 ; CHECK-NEXT: fcvt d0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %i
+  %tmp = load i16, ptr %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
 
-define float @load6(i16* nocapture readonly %a) nounwind {
+define float @load6(ptr nocapture readonly %a) nounwind {
 ; CHECK-LABEL: load6:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, #20]
 ; CHECK-NEXT: fcvt s0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 10
+  %tmp = load i16, ptr %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
 
-define double @load7(i16* nocapture readonly %a) nounwind {
+define double @load7(ptr nocapture readonly %a) nounwind {
 ; CHECK-LABEL: load7:
 ; CHECK-NEXT: ldr [[HREG:h[0-9]+]], [x0, #20]
 ; CHECK-NEXT: fcvt d0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 10
+  %tmp = load i16, ptr %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
 
-define float @load8(i16* nocapture readonly %a) nounwind {
+define float @load8(ptr nocapture readonly %a) nounwind {
 ; CHECK-LABEL: load8:
 ; CHECK-NEXT: ldur [[HREG:h[0-9]+]], [x0, #-20]
 ; CHECK-NEXT: fcvt s0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 -10
+  %tmp = load i16, ptr %arrayidx, align 2
   %tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
   ret float %tmp1
 }
 
-define double @load9(i16* nocapture readonly %a) nounwind {
+define double @load9(ptr nocapture readonly %a) nounwind {
 ; CHECK-LABEL: load9:
 ; CHECK-NEXT: ldur [[HREG:h[0-9]+]], [x0, #-20]
 ; CHECK-NEXT: fcvt d0, [[HREG]]
 ; CHECK-NEXT: ret
 
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
-  %tmp = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 -10
+  %tmp = load i16, ptr %arrayidx, align 2
   %conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
   ret double %conv
 }
 
-define void @store0(i16* nocapture %a, float %val) nounwind {
+define void @store0(ptr nocapture %a, float %val) nounwind {
 ; CHECK-LABEL: store0:
 ; CHECK-NEXT: fcvt h0, s0
 ; CHECK-NEXT: str  h0, [x0]
 ; CHECK-NEXT: ret
 
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
-  store i16 %tmp, i16* %a, align 2
+  store i16 %tmp, ptr %a, align 2
   ret void
 }
 
-define void @store1(i16* nocapture %a, double %val) nounwind {
+define void @store1(ptr nocapture %a, double %val) nounwind {
 ; CHECK-LABEL: store1:
 ; CHECK-NEXT: fcvt s0, d0
 ; CHECK-NEXT: fcvt h0, s0
@@ -140,11 +140,11 @@ define void @store1(i16* nocapture %a, double %val) nounwind {
 
   %conv = fptrunc double %val to float
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
-  store i16 %tmp, i16* %a, align 2
+  store i16 %tmp, ptr %a, align 2
   ret void
 }
 
-define void @store2(i16* nocapture %a, i32 %i, float %val) nounwind {
+define void @store2(ptr nocapture %a, i32 %i, float %val) nounwind {
 ; CHECK-LABEL: store2:
 ; CHECK-NEXT: fcvt h0, s0
 ; CHECK-NEXT: str h0, [x0, w1, sxtw #1]
@@ -152,12 +152,12 @@ define void @store2(i16* nocapture %a, i32 %i, float %val) nounwind {
 
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %idxprom
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 
-define void @store3(i16* nocapture %a, i32 %i, double %val) nounwind {
+define void @store3(ptr nocapture %a, i32 %i, double %val) nounwind {
 ; CHECK-LABEL: store3:
 ; CHECK-NEXT: fcvt s0, d0
 ; CHECK-NEXT: fcvt h0, s0
@@ -167,24 +167,24 @@ define void @store3(i16* nocapture %a, i32 %i, double %val) nounwind {
   %conv = fptrunc double %val to float
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %idxprom
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 
-define void @store4(i16* nocapture %a, i64 %i, float %val) nounwind {
+define void @store4(ptr nocapture %a, i64 %i, float %val) nounwind {
 ; CHECK-LABEL: store4:
 ; CHECK-NEXT: fcvt h0, s0
 ; CHECK-NEXT: str h0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
 
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %i
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 
-define void @store5(i16* nocapture %a, i64 %i, double %val) nounwind {
+define void @store5(ptr nocapture %a, i64 %i, double %val) nounwind {
 ; CHECK-LABEL: store5:
 ; CHECK-NEXT: fcvt s0, d0
 ; CHECK-NEXT: fcvt h0, s0
@@ -193,24 +193,24 @@ define void @store5(i16* nocapture %a, i64 %i, double %val) nounwind {
 
   %conv = fptrunc double %val to float
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 %i
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 
-define void @store6(i16* nocapture %a, float %val) nounwind {
+define void @store6(ptr nocapture %a, float %val) nounwind {
 ; CHECK-LABEL: store6:
 ; CHECK-NEXT: fcvt h0, s0
 ; CHECK-NEXT: str h0, [x0, #20]
 ; CHECK-NEXT: ret
 
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 10
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 
-define void @store7(i16* nocapture %a, double %val) nounwind {
+define void @store7(ptr nocapture %a, double %val) nounwind {
 ; CHECK-LABEL: store7:
 ; CHECK-NEXT: fcvt s0, d0
 ; CHECK-NEXT: fcvt h0, s0
@@ -219,24 +219,24 @@ define void @store7(i16* nocapture %a, double %val) nounwind {
 
   %conv = fptrunc double %val to float
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 10
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 
-define void @store8(i16* nocapture %a, float %val) nounwind {
+define void @store8(ptr nocapture %a, float %val) nounwind {
 ; CHECK-LABEL: store8:
 ; CHECK-NEXT: fcvt h0, s0
 ; CHECK-NEXT: stur h0, [x0, #-20]
 ; CHECK-NEXT: ret
 
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 -10
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 
-define void @store9(i16* nocapture %a, double %val) nounwind {
+define void @store9(ptr nocapture %a, double %val) nounwind {
 ; CHECK-LABEL: store9:
 ; CHECK-NEXT: fcvt s0, d0
 ; CHECK-NEXT: fcvt h0, s0
@@ -245,8 +245,8 @@ define void @store9(i16* nocapture %a, double %val) nounwind {
 
   %conv = fptrunc double %val to float
   %tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
-  %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
-  store i16 %tmp, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds i16, ptr %a, i64 -10
+  store i16 %tmp, ptr %arrayidx, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/f16-instructions.ll b/llvm/test/CodeGen/AArch64/f16-instructions.ll
index 63e98c4c056b7..f7491f21471c6 100644
--- a/llvm/test/CodeGen/AArch64/f16-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/f16-instructions.ll
@@ -116,16 +116,16 @@ define half @test_frem(half %a, half %b) #0 {
 ; CHECK-COMMON-LABEL: test_store:
 ; CHECK-COMMON-NEXT: str  h0, [x0]
 ; CHECK-COMMON-NEXT: ret
-define void @test_store(half %a, half* %b) #0 {
-  store half %a, half* %b
+define void @test_store(half %a, ptr %b) #0 {
+  store half %a, ptr %b
   ret void
 }
 
 ; CHECK-COMMON-LABEL: test_load:
 ; CHECK-COMMON-NEXT: ldr  h0, [x0]
 ; CHECK-COMMON-NEXT: ret
-define half @test_load(half* %a) #0 {
-  %r = load half, half* %a
+define half @test_load(ptr %a) #0 {
+  %r = load half, ptr %a
   ret half %r
 }
 
@@ -494,12 +494,12 @@ define i1 @test_fcmp_ord(half %a, half %b) #0 {
 ; CHECK-FP16-NEXT: str   h0, [x0]
 ; CHECK-FP16-NEXT: ret
 
-define void @test_fccmp(half %in, half* %out) {
+define void @test_fccmp(half %in, ptr %out) {
   %cmp1 = fcmp ogt half %in, 0xH4800
   %cmp2 = fcmp olt half %in, 0xH4500
   %cond = and i1 %cmp1, %cmp2
   %result = select i1 %cond, half %in, half 0xH4500
-  store half %result, half* %out
+  store half %result, ptr %out
   ret void
 }
 
@@ -517,14 +517,14 @@ define void @test_fccmp(half %in, half* %out) {
 ; CHECK-FP16-NEXT: str wzr, [x8]
 ; CHECK-FP16-NEXT: ret
 
-define void @test_br_cc(half %a, half %b, i32* %p1, i32* %p2) #0 {
+define void @test_br_cc(half %a, half %b, ptr %p1, ptr %p2) #0 {
   %c = fcmp uge half %a, %b
   br i1 %c, label %then, label %else
 then:
-  store i32 0, i32* %p1
+  store i32 0, ptr %p1
   ret void
 else:
-  store i32 0, i32* %p2
+  store i32 0, ptr %p2
   ret void
 }
 
@@ -538,20 +538,20 @@ else:
 ; CHECK-COMMON: bl {{_?}}test_dummy
 ; CHECK-COMMON: fmov  s0, s[[R]]
 ; CHECK-COMMON: ret
-define half @test_phi(half* %p1) #0 {
+define half @test_phi(ptr %p1) #0 {
 entry:
-  %a = load half, half* %p1
+  %a = load half, ptr %p1
   br label %loop
 loop:
   %r = phi half [%a, %entry], [%b, %loop]
-  %b = load half, half* %p1
-  %c = call i1 @test_dummy(half* %p1)
+  %b = load half, ptr %p1
+  %c = call i1 @test_dummy(ptr %p1)
   br i1 %c, label %loop, label %return
 return:
   ret half %r
 }
 
-declare i1 @test_dummy(half* %p1) #0
+declare i1 @test_dummy(ptr %p1) #0
 
 ; CHECK-CVT-LABEL: test_fptosi_i32:
 ; CHECK-CVT-NEXT: fcvt s0, h0

diff  --git a/llvm/test/CodeGen/AArch64/fadd-combines.ll b/llvm/test/CodeGen/AArch64/fadd-combines.ll
index 44dc680324df1..b9729ce534f66 100644
--- a/llvm/test/CodeGen/AArch64/fadd-combines.ll
+++ b/llvm/test/CodeGen/AArch64/fadd-combines.ll
@@ -263,7 +263,7 @@ define <2 x double> @fadd_fma_fmul_3(<2 x double> %x1, <2 x double> %x2, <2 x do
 
 ; negative test
 
-define float @fadd_fma_fmul_extra_use_1(float %a, float %b, float %c, float %d, float %n0, float* %p) nounwind {
+define float @fadd_fma_fmul_extra_use_1(float %a, float %b, float %c, float %d, float %n0, ptr %p) nounwind {
 ; CHECK-LABEL: fadd_fma_fmul_extra_use_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul s1, s0, s1
@@ -272,7 +272,7 @@ define float @fadd_fma_fmul_extra_use_1(float %a, float %b, float %c, float %d,
 ; CHECK-NEXT:    fadd s0, s4, s0
 ; CHECK-NEXT:    ret
   %m1 = fmul fast float %a, %b
-  store float %m1, float* %p
+  store float %m1, ptr %p
   %m2 = fmul fast float %c, %d
   %a1 = fadd fast float %m1, %m2
   %a2 = fadd fast float %n0, %a1
@@ -281,7 +281,7 @@ define float @fadd_fma_fmul_extra_use_1(float %a, float %b, float %c, float %d,
 
 ; negative test
 
-define float @fadd_fma_fmul_extra_use_2(float %a, float %b, float %c, float %d, float %n0, float* %p) nounwind {
+define float @fadd_fma_fmul_extra_use_2(float %a, float %b, float %c, float %d, float %n0, ptr %p) nounwind {
 ; CHECK-LABEL: fadd_fma_fmul_extra_use_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul s2, s2, s3
@@ -291,7 +291,7 @@ define float @fadd_fma_fmul_extra_use_2(float %a, float %b, float %c, float %d,
 ; CHECK-NEXT:    ret
   %m1 = fmul fast float %a, %b
   %m2 = fmul fast float %c, %d
-  store float %m2, float* %p
+  store float %m2, ptr %p
   %a1 = fadd fast float %m1, %m2
   %a2 = fadd fast float %n0, %a1
   ret float %a2
@@ -299,7 +299,7 @@ define float @fadd_fma_fmul_extra_use_2(float %a, float %b, float %c, float %d,
 
 ; negative test
 
-define float @fadd_fma_fmul_extra_use_3(float %a, float %b, float %c, float %d, float %n0, float* %p) nounwind {
+define float @fadd_fma_fmul_extra_use_3(float %a, float %b, float %c, float %d, float %n0, ptr %p) nounwind {
 ; CHECK-LABEL: fadd_fma_fmul_extra_use_3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmul s2, s2, s3
@@ -310,7 +310,7 @@ define float @fadd_fma_fmul_extra_use_3(float %a, float %b, float %c, float %d,
   %m1 = fmul fast float %a, %b
   %m2 = fmul fast float %c, %d
   %a1 = fadd fast float %m1, %m2
-  store float %a1, float* %p
+  store float %a1, ptr %p
   %a2 = fadd fast float %n0, %a1
   ret float %a2
 }

diff  --git a/llvm/test/CodeGen/AArch64/falkor-hwpf-fix.ll b/llvm/test/CodeGen/AArch64/falkor-hwpf-fix.ll
index 9f2af5adce71a..de72d7e448246 100644
--- a/llvm/test/CodeGen/AArch64/falkor-hwpf-fix.ll
+++ b/llvm/test/CodeGen/AArch64/falkor-hwpf-fix.ll
@@ -10,52 +10,52 @@
 ; CHECK: mov x[[BASE4:[0-9]+]], x[[BASE3]]
 ; CHECK: ldp {{w[0-9]+}}, {{w[0-9]+}}, [x[[BASE4]], #8]
 
-define void @hwpf1(i32* %p, i32* %sp, i32* %sp2, i32* %sp3, i32* %sp4) {
+define void @hwpf1(ptr %p, ptr %sp, ptr %sp2, ptr %sp3, ptr %sp4) {
 entry:
   br label %loop
 
 loop:
   %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
 
-  %gep = getelementptr inbounds i32, i32* %p, i32 %iv
-  %load1 = load i32, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %p, i32 %iv
+  %load1 = load i32, ptr %gep
 
-  %gep2 = getelementptr inbounds i32, i32* %gep, i32 1
-  %load2 = load i32, i32* %gep2
+  %gep2 = getelementptr inbounds i32, ptr %gep, i32 1
+  %load2 = load i32, ptr %gep2
 
   %add = add i32 %load1, %load2
-  %storegep = getelementptr inbounds i32, i32* %sp, i32 %iv
-  store i32 %add, i32* %storegep
+  %storegep = getelementptr inbounds i32, ptr %sp, i32 %iv
+  store i32 %add, ptr %storegep
 
-  %gep3 = getelementptr inbounds i32, i32* %gep, i32 2
-  %load3 = load i32, i32* %gep3
+  %gep3 = getelementptr inbounds i32, ptr %gep, i32 2
+  %load3 = load i32, ptr %gep3
 
-  %gep4 = getelementptr inbounds i32, i32* %gep, i32 3
-  %load4 = load i32, i32* %gep4
+  %gep4 = getelementptr inbounds i32, ptr %gep, i32 3
+  %load4 = load i32, ptr %gep4
 
   %add2 = add i32 %load3, %load4
-  %storegep2 = getelementptr inbounds i32, i32* %sp2, i32 %iv
-  store i32 %add2, i32* %storegep2
+  %storegep2 = getelementptr inbounds i32, ptr %sp2, i32 %iv
+  store i32 %add2, ptr %storegep2
 
-  %gep5 = getelementptr inbounds i32, i32* %gep, i32 4
-  %load5 = load i32, i32* %gep5
+  %gep5 = getelementptr inbounds i32, ptr %gep, i32 4
+  %load5 = load i32, ptr %gep5
 
-  %gep6 = getelementptr inbounds i32, i32* %gep, i32 5
-  %load6 = load i32, i32* %gep6
+  %gep6 = getelementptr inbounds i32, ptr %gep, i32 5
+  %load6 = load i32, ptr %gep6
 
   %add3 = add i32 %load5, %load6
-  %storegep3 = getelementptr inbounds i32, i32* %sp3, i32 %iv
-  store i32 %add3, i32* %storegep3
+  %storegep3 = getelementptr inbounds i32, ptr %sp3, i32 %iv
+  store i32 %add3, ptr %storegep3
 
-  %gep7 = getelementptr inbounds i32, i32* %gep, i32 6
-  %load7 = load i32, i32* %gep7
+  %gep7 = getelementptr inbounds i32, ptr %gep, i32 6
+  %load7 = load i32, ptr %gep7
 
-  %gep8 = getelementptr inbounds i32, i32* %gep, i32 7
-  %load8 = load i32, i32* %gep8
+  %gep8 = getelementptr inbounds i32, ptr %gep, i32 7
+  %load8 = load i32, ptr %gep8
 
   %add4 = add i32 %load7, %load8
-  %storegep4 = getelementptr inbounds i32, i32* %sp4, i32 %iv
-  store i32 %add4, i32* %storegep4
+  %storegep4 = getelementptr inbounds i32, ptr %sp4, i32 %iv
+  store i32 %add4, ptr %storegep4
 
   %inc = add i32 %iv, 8
   %exitcnd = icmp uge i32 %inc, 1024

diff  --git a/llvm/test/CodeGen/AArch64/falkor-hwpf.ll b/llvm/test/CodeGen/AArch64/falkor-hwpf.ll
index aa4a43f2430ba..983aaafad0b04 100644
--- a/llvm/test/CodeGen/AArch64/falkor-hwpf.ll
+++ b/llvm/test/CodeGen/AArch64/falkor-hwpf.ll
@@ -4,24 +4,24 @@
 ; Check that strided access metadata is added to loads in inner loops when compiling for Falkor.
 
 ; CHECK-LABEL: @hwpf1(
-; CHECK: load i32, i32* %gep, align 4, !falkor.strided.access !0
-; CHECK: load i32, i32* %gep2, align 4, !falkor.strided.access !0
+; CHECK: load i32, ptr %gep, align 4, !falkor.strided.access !0
+; CHECK: load i32, ptr %gep2, align 4, !falkor.strided.access !0
 
 ; NOHWPF-LABEL: @hwpf1(
-; NOHWPF: load i32, i32* %gep, align 4{{$}}
-; NOHWPF: load i32, i32* %gep2, align 4{{$}}
-define void @hwpf1(i32* %p, i32* %p2) {
+; NOHWPF: load i32, ptr %gep, align 4{{$}}
+; NOHWPF: load i32, ptr %gep2, align 4{{$}}
+define void @hwpf1(ptr %p, ptr %p2) {
 entry:
   br label %loop
 
 loop:
   %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
 
-  %gep = getelementptr inbounds i32, i32* %p, i32 %iv
-  %load = load i32, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %p, i32 %iv
+  %load = load i32, ptr %gep
 
-  %gep2 = getelementptr inbounds i32, i32* %p2, i32 %iv
-  %load2 = load i32, i32* %gep2
+  %gep2 = getelementptr inbounds i32, ptr %p2, i32 %iv
+  %load2 = load i32, ptr %gep2
 
   %inc = add i32 %iv, 1
   %exitcnd = icmp uge i32 %inc, 1024
@@ -33,13 +33,13 @@ exit:
 
 ; Check that outer loop strided load isn't marked.
 ; CHECK-LABEL: @hwpf2(
-; CHECK: load i32, i32* %gep, align 4, !falkor.strided.access !0
-; CHECK: load i32, i32* %gep2, align 4{{$}}
+; CHECK: load i32, ptr %gep, align 4, !falkor.strided.access !0
+; CHECK: load i32, ptr %gep2, align 4{{$}}
 
 ; NOHWPF-LABEL: @hwpf2(
-; NOHWPF: load i32, i32* %gep, align 4{{$}}
-; NOHWPF: load i32, i32* %gep2, align 4{{$}}
-define void @hwpf2(i32* %p) {
+; NOHWPF: load i32, ptr %gep, align 4{{$}}
+; NOHWPF: load i32, ptr %gep2, align 4{{$}}
+define void @hwpf2(ptr %p) {
 entry:
   br label %loop1
 
@@ -54,16 +54,16 @@ loop2.header:
 loop2:
   %iv2 = phi i32 [ 0, %loop2.header ], [ %inc2, %loop2 ]
   %sum = phi i32 [ %outer.sum, %loop2.header ], [ %sum.inc, %loop2 ]
-  %gep = getelementptr inbounds i32, i32* %p, i32 %iv2
-  %load = load i32, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %p, i32 %iv2
+  %load = load i32, ptr %gep
   %sum.inc = add i32 %sum, %load
   %inc2 = add i32 %iv2, 1
   %exitcnd2 = icmp uge i32 %inc2, 1024
   br i1 %exitcnd2, label %exit2, label %loop2
 
 exit2:
-  %gep2 = getelementptr inbounds i32, i32* %p, i32 %iv1
-  %load2 = load i32, i32* %gep2
+  %gep2 = getelementptr inbounds i32, ptr %p, i32 %iv1
+  %load2 = load i32, ptr %gep2
   br label %loop1.latch
 
 loop1.latch:
@@ -78,24 +78,24 @@ exit:
 
 ; Check that non-strided load isn't marked.
 ; CHECK-LABEL: @hwpf3(
-; CHECK: load i32, i32* %gep, align 4, !falkor.strided.access !0
-; CHECK: load i32, i32* %gep2, align 4{{$}}
+; CHECK: load i32, ptr %gep, align 4, !falkor.strided.access !0
+; CHECK: load i32, ptr %gep2, align 4{{$}}
 
 ; NOHWPF-LABEL: @hwpf3(
-; NOHWPF: load i32, i32* %gep, align 4{{$}}
-; NOHWPF: load i32, i32* %gep2, align 4{{$}}
-define void @hwpf3(i32* %p, i32* %p2) {
+; NOHWPF: load i32, ptr %gep, align 4{{$}}
+; NOHWPF: load i32, ptr %gep2, align 4{{$}}
+define void @hwpf3(ptr %p, ptr %p2) {
 entry:
   br label %loop
 
 loop:
   %iv = phi i32 [ 0, %entry ], [ %inc, %loop ]
 
-  %gep = getelementptr inbounds i32, i32* %p, i32 %iv
-  %load = load i32, i32* %gep
+  %gep = getelementptr inbounds i32, ptr %p, i32 %iv
+  %load = load i32, ptr %gep
 
-  %gep2 = getelementptr inbounds i32, i32* %p2, i32 %load
-  %load2 = load i32, i32* %gep2
+  %gep2 = getelementptr inbounds i32, ptr %p2, i32 %load
+  %load2 = load i32, ptr %gep2
 
   %inc = add i32 %iv, 1
   %exitcnd = icmp uge i32 %inc, 1024

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-address-extends.ll b/llvm/test/CodeGen/AArch64/fast-isel-address-extends.ll
index 8b0ffa8c10dab..88858ae876e57 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-address-extends.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-address-extends.ll
@@ -12,29 +12,29 @@ target triple = "arm64-apple-ios8.0.0"
 ; CHECK: strh wzr, [{{.*}}, [[REG1]], lsl #1]
 
 ; Function Attrs: nounwind optsize ssp
-define void @EdgeLoop(i32 %dir, i32 %edge, i32 %width, i16* %tmp89, i32 %tmp136, i16 %tmp144) #0 {
+define void @EdgeLoop(i32 %dir, i32 %edge, i32 %width, ptr %tmp89, i32 %tmp136, i16 %tmp144) #0 {
 bb:
   %tmp2 = icmp eq i32 %dir, 0
   %.mux = select i1 %tmp2, i32 %width, i32 1
   %tmp142 = sext i32 %.mux to i64
   %tmp151 = shl nsw i64 %tmp142, 1
-  %tmp153 = getelementptr inbounds i16, i16* %tmp89, i64 %tmp151
-  %tmp154 = load i16, i16* %tmp153, align 2
+  %tmp153 = getelementptr inbounds i16, ptr %tmp89, i64 %tmp151
+  %tmp154 = load i16, ptr %tmp153, align 2
   %tmp155 = zext i16 %tmp154 to i32
   br i1 %tmp2, label %bb225, label %bb212
 
 bb212:                                            ; preds = %bb
-  store i16 %tmp144, i16* %tmp89, align 2
+  store i16 %tmp144, ptr %tmp89, align 2
   ret void
 
 bb225:                                            ; preds = %bb
   %tmp248 = trunc i32 %tmp155 to i16
-  store i16 %tmp248, i16* %tmp89, align 2
-  %sunkaddr = ptrtoint i16* %tmp89 to i64
+  store i16 %tmp248, ptr %tmp89, align 2
+  %sunkaddr = ptrtoint ptr %tmp89 to i64
   %sunkaddr1 = mul i64 %tmp142, 2
   %sunkaddr2 = add i64 %sunkaddr, %sunkaddr1
-  %sunkaddr3 = inttoptr i64 %sunkaddr2 to i16*
-  store i16 0, i16* %sunkaddr3, align 2
+  %sunkaddr3 = inttoptr i64 %sunkaddr2 to ptr
+  store i16 0, ptr %sunkaddr3, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll b/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
index 339841790f2bf..422d2c7a82342 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
 
 ; Load / Store Base Register only
-define zeroext i1 @load_breg_i1(i1* %a) {
+define zeroext i1 @load_breg_i1(ptr %a) {
 ; SDAG-LABEL: load_breg_i1:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    ldrb w0, [x0]
@@ -15,11 +15,11 @@ define zeroext i1 @load_breg_i1(i1* %a) {
 ; FAST-NEXT:    and w8, w8, #0x1
 ; FAST-NEXT:    and w0, w8, #0x1
 ; FAST-NEXT:    ret
-  %1 = load i1, i1* %a
+  %1 = load i1, ptr %a
   ret i1 %1
 }
 
-define zeroext i8 @load_breg_i8(i8* %a) {
+define zeroext i8 @load_breg_i8(ptr %a) {
 ; SDAG-LABEL: load_breg_i8:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    ldrb w0, [x0]
@@ -30,11 +30,11 @@ define zeroext i8 @load_breg_i8(i8* %a) {
 ; FAST-NEXT:    ldrb w8, [x0]
 ; FAST-NEXT:    uxtb w0, w8
 ; FAST-NEXT:    ret
-  %1 = load i8, i8* %a
+  %1 = load i8, ptr %a
   ret i8 %1
 }
 
-define zeroext i16 @load_breg_i16(i16* %a) {
+define zeroext i16 @load_breg_i16(ptr %a) {
 ; SDAG-LABEL: load_breg_i16:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    ldrh w0, [x0]
@@ -45,56 +45,56 @@ define zeroext i16 @load_breg_i16(i16* %a) {
 ; FAST-NEXT:    ldrh w8, [x0]
 ; FAST-NEXT:    uxth w0, w8
 ; FAST-NEXT:    ret
-  %1 = load i16, i16* %a
+  %1 = load i16, ptr %a
   ret i16 %1
 }
 
-define i32 @load_breg_i32(i32* %a) {
+define i32 @load_breg_i32(ptr %a) {
 ; CHECK-LABEL: load_breg_i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load i32, i32* %a
+  %1 = load i32, ptr %a
   ret i32 %1
 }
 
-define i64 @load_breg_i64(i64* %a) {
+define i64 @load_breg_i64(ptr %a) {
 ; CHECK-LABEL: load_breg_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr x0, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load i64, i64* %a
+  %1 = load i64, ptr %a
   ret i64 %1
 }
 
-define float @load_breg_f32(float* %a) {
+define float @load_breg_f32(ptr %a) {
 ; CHECK-LABEL: load_breg_f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load float, float* %a
+  %1 = load float, ptr %a
   ret float %1
 }
 
-define double @load_breg_f64(double* %a) {
+define double @load_breg_f64(ptr %a) {
 ; CHECK-LABEL: load_breg_f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load double, double* %a
+  %1 = load double, ptr %a
   ret double %1
 }
 
-define void @store_breg_i1(i1* %a) {
+define void @store_breg_i1(ptr %a) {
 ; CHECK-LABEL: store_breg_i1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb wzr, [x0]
 ; CHECK-NEXT:    ret
-  store i1 0, i1* %a
+  store i1 0, ptr %a
   ret void
 }
 
-define void @store_breg_i1_2(i1* %a) {
+define void @store_breg_i1_2(ptr %a) {
 ; SDAG-LABEL: store_breg_i1_2:
 ; SDAG:       ; %bb.0:
 ; SDAG-NEXT:    mov w8, #1
@@ -107,61 +107,61 @@ define void @store_breg_i1_2(i1* %a) {
 ; FAST-NEXT:    and w8, w8, #0x1
 ; FAST-NEXT:    strb w8, [x0]
 ; FAST-NEXT:    ret
-  store i1 true, i1* %a
+  store i1 true, ptr %a
   ret void
 }
 
-define void @store_breg_i8(i8* %a) {
+define void @store_breg_i8(ptr %a) {
 ; CHECK-LABEL: store_breg_i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strb wzr, [x0]
 ; CHECK-NEXT:    ret
-  store i8 0, i8* %a
+  store i8 0, ptr %a
   ret void
 }
 
-define void @store_breg_i16(i16* %a) {
+define void @store_breg_i16(ptr %a) {
 ; CHECK-LABEL: store_breg_i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    strh wzr, [x0]
 ; CHECK-NEXT:    ret
-  store i16 0, i16* %a
+  store i16 0, ptr %a
   ret void
 }
 
-define void @store_breg_i32(i32* %a) {
+define void @store_breg_i32(ptr %a) {
 ; CHECK-LABEL: store_breg_i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str wzr, [x0]
 ; CHECK-NEXT:    ret
-  store i32 0, i32* %a
+  store i32 0, ptr %a
   ret void
 }
 
-define void @store_breg_i64(i64* %a) {
+define void @store_breg_i64(ptr %a) {
 ; CHECK-LABEL: store_breg_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str xzr, [x0]
 ; CHECK-NEXT:    ret
-  store i64 0, i64* %a
+  store i64 0, ptr %a
   ret void
 }
 
-define void @store_breg_f32(float* %a) {
+define void @store_breg_f32(ptr %a) {
 ; CHECK-LABEL: store_breg_f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str wzr, [x0]
 ; CHECK-NEXT:    ret
-  store float 0.0, float* %a
+  store float 0.0, ptr %a
   ret void
 }
 
-define void @store_breg_f64(double* %a) {
+define void @store_breg_f64(ptr %a) {
 ; CHECK-LABEL: store_breg_f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    str xzr, [x0]
 ; CHECK-NEXT:    ret
-  store double 0.0, double* %a
+  store double 0.0, ptr %a
   ret void
 }
 
@@ -178,8 +178,8 @@ define i32 @load_immoff_1() {
 ; FAST-NEXT:    mov x8, #128
 ; FAST-NEXT:    ldr w0, [x8]
 ; FAST-NEXT:    ret
-  %1 = inttoptr i64 128 to i32*
-  %2 = load i32, i32* %1
+  %1 = inttoptr i64 128 to ptr
+  %2 = load i32, ptr %1
   ret i32 %2
 }
 
@@ -191,8 +191,8 @@ define i32 @load_breg_immoff_1(i64 %a) {
 ; CHECK-NEXT:    ldur w0, [x0, #-256]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, -256
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -204,8 +204,8 @@ define i32 @load_breg_immoff_2(i64 %a) {
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, -257
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -216,8 +216,8 @@ define i32 @load_breg_immoff_3(i64 %a) {
 ; CHECK-NEXT:    ldur w0, [x0, #255]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 255
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -229,8 +229,8 @@ define i32 @load_breg_immoff_4(i64 %a) {
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 257
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -241,8 +241,8 @@ define i32 @load_breg_immoff_5(i64 %a) {
 ; CHECK-NEXT:    ldr w0, [x0, #16380]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 16380
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -260,8 +260,8 @@ define i32 @load_breg_immoff_6(i64 %a) {
 ; FAST-NEXT:    ldr w0, [x8]
 ; FAST-NEXT:    ret
   %1 = add i64 %a, 16384
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -272,8 +272,8 @@ define void @store_breg_immoff_1(i64 %a) {
 ; CHECK-NEXT:    stur wzr, [x0, #-256]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, -256
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
@@ -285,8 +285,8 @@ define void @store_breg_immoff_2(i64 %a) {
 ; CHECK-NEXT:    str wzr, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, -257
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
@@ -297,8 +297,8 @@ define void @store_breg_immoff_3(i64 %a) {
 ; CHECK-NEXT:    stur wzr, [x0, #255]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 255
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
@@ -310,8 +310,8 @@ define void @store_breg_immoff_4(i64 %a) {
 ; CHECK-NEXT:    str wzr, [x8]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 257
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
@@ -322,8 +322,8 @@ define void @store_breg_immoff_5(i64 %a) {
 ; CHECK-NEXT:    str wzr, [x0, #16380]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 16380
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
@@ -341,8 +341,8 @@ define void @store_breg_immoff_6(i64 %a) {
 ; FAST-NEXT:    str wzr, [x8]
 ; FAST-NEXT:    ret
   %1 = add i64 %a, 16384
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
@@ -352,8 +352,8 @@ define i64 @load_breg_immoff_7(i64 %a) {
 ; CHECK-NEXT:    ldr x0, [x0, #48]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, 48
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load i64, i64* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i64, ptr %2
   ret i64 %3
 }
 
@@ -364,8 +364,8 @@ define i64 @load_breg_immoff_8(i64 %a) {
 ; CHECK-NEXT:    ldr x0, [x0, #48]
 ; CHECK-NEXT:    ret
   %1 = add i64 48, %a
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load i64, i64* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i64, ptr %2
   ret i64 %3
 }
 
@@ -376,8 +376,8 @@ define i64 @load_breg_offreg_1(i64 %a, i64 %b) {
 ; CHECK-NEXT:    ldr x0, [x0, x1]
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load i64, i64* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i64, ptr %2
   ret i64 %3
 }
 
@@ -388,8 +388,8 @@ define i64 @load_breg_offreg_2(i64 %a, i64 %b) {
 ; CHECK-NEXT:    ldr x0, [x1, x0]
 ; CHECK-NEXT:    ret
   %1 = add i64 %b, %a
-  %2 = inttoptr i64 %1 to i64*
-  %3 = load i64, i64* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i64, ptr %2
   ret i64 %3
 }
 
@@ -402,8 +402,8 @@ define i64 @load_breg_offreg_immoff_1(i64 %a, i64 %b) {
 ; CHECK-NEXT:    ret
   %1 = add i64 %a, %b
   %2 = add i64 %1, 48
-  %3 = inttoptr i64 %2 to i64*
-  %4 = load i64, i64* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i64, ptr %3
   ret i64 %4
 }
 
@@ -422,8 +422,8 @@ define i64 @load_breg_offreg_immoff_2(i64 %a, i64 %b) {
 ; FAST-NEXT:    ret
   %1 = add i64 %a, %b
   %2 = add i64 %1, 61440
-  %3 = inttoptr i64 %2 to i64*
-  %4 = load i64, i64* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i64, ptr %3
   ret i64 %4
 }
 
@@ -435,8 +435,8 @@ define i32 @load_shift_offreg_1(i64 %a) {
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    ret
   %1 = shl i64 %a, 2
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -447,8 +447,8 @@ define i32 @load_mul_offreg_1(i64 %a) {
 ; CHECK-NEXT:    ldr w0, [x8]
 ; CHECK-NEXT:    ret
   %1 = mul i64 %a, 4
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   ret i32 %3
 }
 
@@ -460,8 +460,8 @@ define i32 @load_breg_shift_offreg_1(i64 %a, i64 %b) {
 ; CHECK-NEXT:    ret
   %1 = shl i64 %a, 2
   %2 = add i64 %1, %b
-  %3 = inttoptr i64 %2 to i32*
-  %4 = load i32, i32* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i32, ptr %3
   ret i32 %4
 }
 
@@ -472,8 +472,8 @@ define i32 @load_breg_shift_offreg_2(i64 %a, i64 %b) {
 ; CHECK-NEXT:    ret
   %1 = shl i64 %a, 2
   %2 = add i64 %b, %1
-  %3 = inttoptr i64 %2 to i32*
-  %4 = load i32, i32* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i32, ptr %3
   ret i32 %4
 }
 
@@ -492,8 +492,8 @@ define i32 @load_breg_shift_offreg_3(i64 %a, i64 %b) {
   %1 = shl i64 %a, 2
   %2 = shl i64 %b, 2
   %3 = add i64 %1, %2
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -512,8 +512,8 @@ define i32 @load_breg_shift_offreg_4(i64 %a, i64 %b) {
   %1 = shl i64 %a, 2
   %2 = shl i64 %b, 2
   %3 = add i64 %2, %1
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -526,8 +526,8 @@ define i32 @load_breg_shift_offreg_5(i64 %a, i64 %b) {
   %1 = shl i64 %a, 2
   %2 = shl i64 %b, 3
   %3 = add i64 %1, %2
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -538,8 +538,8 @@ define i32 @load_breg_mul_offreg_1(i64 %a, i64 %b) {
 ; CHECK-NEXT:    ret
   %1 = mul i64 %a, 4
   %2 = add i64 %1, %b
-  %3 = inttoptr i64 %2 to i32*
-  %4 = load i32, i32* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i32, ptr %3
   ret i32 %4
 }
 
@@ -556,8 +556,8 @@ define zeroext i8 @load_breg_and_offreg_1(i64 %a, i64 %b) {
 ; FAST-NEXT:    ret
   %1 = and i64 %a, 4294967295
   %2 = add i64 %1, %b
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   ret i8 %4
 }
 
@@ -575,8 +575,8 @@ define zeroext i16 @load_breg_and_offreg_2(i64 %a, i64 %b) {
   %1 = and i64 %a, 4294967295
   %2 = shl i64 %1, 1
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i16*
-  %5 = load i16, i16* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i16, ptr %4
   ret i16 %5
 }
 
@@ -588,8 +588,8 @@ define i32 @load_breg_and_offreg_3(i64 %a, i64 %b) {
   %1 = and i64 %a, 4294967295
   %2 = shl i64 %1, 2
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -601,8 +601,8 @@ define i64 @load_breg_and_offreg_4(i64 %a, i64 %b) {
   %1 = and i64 %a, 4294967295
   %2 = shl i64 %1, 3
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -615,8 +615,8 @@ define i64 @load_breg_and_offreg_5(i64 %a, i64 %b, i64 %c) {
 ; CHECK-NEXT:    ret
   %1 = and i64 %a, %c
   %2 = add i64 %1, %b
-  %3 = inttoptr i64 %2 to i64*
-  %4 = load i64, i64* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i64, ptr %3
   ret i64 %4
 }
 
@@ -629,8 +629,8 @@ define i64 @load_breg_and_offreg_6(i64 %a, i64 %b, i64 %c) {
   %1 = and i64 %a, %c
   %2 = shl i64 %1, 3
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -643,8 +643,8 @@ define i32 @load_breg_zext_shift_offreg_1(i32 %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = shl i64 %1, 2
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -656,8 +656,8 @@ define i32 @load_breg_zext_shift_offreg_2(i32 %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = shl i64 %1, 2
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -669,8 +669,8 @@ define i32 @load_breg_zext_mul_offreg_1(i32 %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = mul i64 %1, 4
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -682,8 +682,8 @@ define i32 @load_breg_sext_shift_offreg_1(i32 %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = shl i64 %1, 2
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -695,8 +695,8 @@ define i32 @load_breg_sext_shift_offreg_2(i32 %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = shl i64 %1, 2
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -711,8 +711,8 @@ define i32 @load_breg_sext_shift_offreg_3(i32 %a, i64 %b) {
   %2 = sext i32 %1 to i64
   %3 = shl i64 %2, 2
   %4 = add i64 %b, %3
-  %5 = inttoptr i64 %4 to i32*
-  %6 = load i32, i32* %5
+  %5 = inttoptr i64 %4 to ptr
+  %6 = load i32, ptr %5
   ret i32 %6
 }
 
@@ -725,8 +725,8 @@ define i32 @load_breg_sext_mul_offreg_1(i32 %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = mul i64 %1, 4
   %3 = add i64 %2, %b
-  %4 = inttoptr i64 %3 to i32*
-  %5 = load i32, i32* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i32, ptr %4
   ret i32 %5
 }
 
@@ -741,8 +741,8 @@ define i64 @load_sext_shift_offreg_imm1(i32 %a) {
   %1 = sext i32 %a to i64
   %2 = shl i64 %1, 3
   %3 = add i64 %2, 8
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -757,8 +757,8 @@ define i64 @load_breg_sext_shift_offreg_imm1(i32 %a, i64 %b) {
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
   %4 = add i64 %3, 8
-  %5 = inttoptr i64 %4 to i64*
-  %6 = load i64, i64* %5
+  %5 = inttoptr i64 %4 to ptr
+  %6 = load i64, ptr %5
   ret i64 %6
 }
 
@@ -779,8 +779,8 @@ define i64 @kill_reg(i64 %a) {
 ; FAST-NEXT:    ret
   %1 = sub i64 %a, 8
   %2 = add i64 %1, 96
-  %3 = inttoptr i64 %2 to i64*
-  %4 = load i64, i64* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i64, ptr %3
   %5 = add i64 %2, %4
   ret i64 %5
 }
@@ -806,11 +806,11 @@ define void @store_fi(i64 %i) {
 ; FAST-NEXT:    add sp, sp, #32
 ; FAST-NEXT:    ret
   %1 = alloca [8 x i32]
-  %2 = ptrtoint [8 x i32]* %1 to i64
+  %2 = ptrtoint ptr %1 to i64
   %3 = mul i64 %i, 4
   %4 = add i64 %2, %3
-  %5 = inttoptr i64 %4 to i32*
-  store i32 47, i32* %5, align 4
+  %5 = inttoptr i64 %4 to ptr
+  store i32 47, ptr %5, align 4
   ret void
 }
 
@@ -824,11 +824,11 @@ define i32 @load_fi(i64 %i) {
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %1 = alloca [8 x i32]
-  %2 = ptrtoint [8 x i32]* %1 to i64
+  %2 = ptrtoint ptr %1 to i64
   %3 = mul i64 %i, 4
   %4 = add i64 %2, %3
-  %5 = inttoptr i64 %4 to i32*
-  %6 = load i32, i32* %5, align 4
+  %5 = inttoptr i64 %4 to ptr
+  %6 = load i32, ptr %5, align 4
   ret i32 %6
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-atomic.ll b/llvm/test/CodeGen/AArch64/fast-isel-atomic.ll
index 7a841fd9da5ad..173308d44a1d1 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-atomic.ll
@@ -8,8 +8,8 @@
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  strb  w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_8(i8* %p, i8 %val) #0 {
-  store atomic i8 %val, i8* %p monotonic, align 1
+define void @atomic_store_monotonic_8(ptr %p, i8 %val) #0 {
+  store atomic i8 %val, ptr %p monotonic, align 1
   ret void
 }
 
@@ -17,9 +17,9 @@ define void @atomic_store_monotonic_8(i8* %p, i8 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  strb w1, [x0, #1]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_8_off(i8* %p, i8 %val) #0 {
-  %tmp0 = getelementptr i8, i8* %p, i32 1
-  store atomic i8 %val, i8* %tmp0 monotonic, align 1
+define void @atomic_store_monotonic_8_off(ptr %p, i8 %val) #0 {
+  %tmp0 = getelementptr i8, ptr %p, i32 1
+  store atomic i8 %val, ptr %tmp0 monotonic, align 1
   ret void
 }
 
@@ -27,8 +27,8 @@ define void @atomic_store_monotonic_8_off(i8* %p, i8 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  strh  w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_16(i16* %p, i16 %val) #0 {
-  store atomic i16 %val, i16* %p monotonic, align 2
+define void @atomic_store_monotonic_16(ptr %p, i16 %val) #0 {
+  store atomic i16 %val, ptr %p monotonic, align 2
   ret void
 }
 
@@ -36,9 +36,9 @@ define void @atomic_store_monotonic_16(i16* %p, i16 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  strh w1, [x0, #2]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_16_off(i16* %p, i16 %val) #0 {
-  %tmp0 = getelementptr i16, i16* %p, i32 1
-  store atomic i16 %val, i16* %tmp0 monotonic, align 2
+define void @atomic_store_monotonic_16_off(ptr %p, i16 %val) #0 {
+  %tmp0 = getelementptr i16, ptr %p, i32 1
+  store atomic i16 %val, ptr %tmp0 monotonic, align 2
   ret void
 }
 
@@ -46,8 +46,8 @@ define void @atomic_store_monotonic_16_off(i16* %p, i16 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  str  w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_32(i32* %p, i32 %val) #0 {
-  store atomic i32 %val, i32* %p monotonic, align 4
+define void @atomic_store_monotonic_32(ptr %p, i32 %val) #0 {
+  store atomic i32 %val, ptr %p monotonic, align 4
   ret void
 }
 
@@ -55,9 +55,9 @@ define void @atomic_store_monotonic_32(i32* %p, i32 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  str w1, [x0, #4]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_32_off(i32* %p, i32 %val) #0 {
-  %tmp0 = getelementptr i32, i32* %p, i32 1
-  store atomic i32 %val, i32* %tmp0 monotonic, align 4
+define void @atomic_store_monotonic_32_off(ptr %p, i32 %val) #0 {
+  %tmp0 = getelementptr i32, ptr %p, i32 1
+  store atomic i32 %val, ptr %tmp0 monotonic, align 4
   ret void
 }
 
@@ -65,8 +65,8 @@ define void @atomic_store_monotonic_32_off(i32* %p, i32 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  str  x1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_64(i64* %p, i64 %val) #0 {
-  store atomic i64 %val, i64* %p monotonic, align 8
+define void @atomic_store_monotonic_64(ptr %p, i64 %val) #0 {
+  store atomic i64 %val, ptr %p monotonic, align 8
   ret void
 }
 
@@ -74,9 +74,9 @@ define void @atomic_store_monotonic_64(i64* %p, i64 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  str x1, [x0, #8]
 ; CHECK-NEXT:  ret
-define void @atomic_store_monotonic_64_off(i64* %p, i64 %val) #0 {
-  %tmp0 = getelementptr i64, i64* %p, i32 1
-  store atomic i64 %val, i64* %tmp0 monotonic, align 8
+define void @atomic_store_monotonic_64_off(ptr %p, i64 %val) #0 {
+  %tmp0 = getelementptr i64, ptr %p, i32 1
+  store atomic i64 %val, ptr %tmp0 monotonic, align 8
   ret void
 }
 
@@ -84,8 +84,8 @@ define void @atomic_store_monotonic_64_off(i64* %p, i64 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlrb w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_8(i8* %p, i8 %val) #0 {
-  store atomic i8 %val, i8* %p release, align 1
+define void @atomic_store_release_8(ptr %p, i8 %val) #0 {
+  store atomic i8 %val, ptr %p release, align 1
   ret void
 }
 
@@ -94,9 +94,9 @@ define void @atomic_store_release_8(i8* %p, i8 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #1
 ; CHECK-NEXT:  stlrb w1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_8_off(i8* %p, i8 %val) #0 {
-  %tmp0 = getelementptr i8, i8* %p, i32 1
-  store atomic i8 %val, i8* %tmp0 release, align 1
+define void @atomic_store_release_8_off(ptr %p, i8 %val) #0 {
+  %tmp0 = getelementptr i8, ptr %p, i32 1
+  store atomic i8 %val, ptr %tmp0 release, align 1
   ret void
 }
 
@@ -104,8 +104,8 @@ define void @atomic_store_release_8_off(i8* %p, i8 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlrh w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_16(i16* %p, i16 %val) #0 {
-  store atomic i16 %val, i16* %p release, align 2
+define void @atomic_store_release_16(ptr %p, i16 %val) #0 {
+  store atomic i16 %val, ptr %p release, align 2
   ret void
 }
 
@@ -114,9 +114,9 @@ define void @atomic_store_release_16(i16* %p, i16 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #2
 ; CHECK-NEXT:  stlrh w1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_16_off(i16* %p, i16 %val) #0 {
-  %tmp0 = getelementptr i16, i16* %p, i32 1
-  store atomic i16 %val, i16* %tmp0 release, align 2
+define void @atomic_store_release_16_off(ptr %p, i16 %val) #0 {
+  %tmp0 = getelementptr i16, ptr %p, i32 1
+  store atomic i16 %val, ptr %tmp0 release, align 2
   ret void
 }
 
@@ -124,8 +124,8 @@ define void @atomic_store_release_16_off(i16* %p, i16 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlr w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_32(i32* %p, i32 %val) #0 {
-  store atomic i32 %val, i32* %p release, align 4
+define void @atomic_store_release_32(ptr %p, i32 %val) #0 {
+  store atomic i32 %val, ptr %p release, align 4
   ret void
 }
 
@@ -134,9 +134,9 @@ define void @atomic_store_release_32(i32* %p, i32 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #4
 ; CHECK-NEXT:  stlr w1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_32_off(i32* %p, i32 %val) #0 {
-  %tmp0 = getelementptr i32, i32* %p, i32 1
-  store atomic i32 %val, i32* %tmp0 release, align 4
+define void @atomic_store_release_32_off(ptr %p, i32 %val) #0 {
+  %tmp0 = getelementptr i32, ptr %p, i32 1
+  store atomic i32 %val, ptr %tmp0 release, align 4
   ret void
 }
 
@@ -144,8 +144,8 @@ define void @atomic_store_release_32_off(i32* %p, i32 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlr x1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_64(i64* %p, i64 %val) #0 {
-  store atomic i64 %val, i64* %p release, align 8
+define void @atomic_store_release_64(ptr %p, i64 %val) #0 {
+  store atomic i64 %val, ptr %p release, align 8
   ret void
 }
 
@@ -154,9 +154,9 @@ define void @atomic_store_release_64(i64* %p, i64 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #8
 ; CHECK-NEXT:  stlr x1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_release_64_off(i64* %p, i64 %val) #0 {
-  %tmp0 = getelementptr i64, i64* %p, i32 1
-  store atomic i64 %val, i64* %tmp0 release, align 8
+define void @atomic_store_release_64_off(ptr %p, i64 %val) #0 {
+  %tmp0 = getelementptr i64, ptr %p, i32 1
+  store atomic i64 %val, ptr %tmp0 release, align 8
   ret void
 }
 
@@ -165,8 +165,8 @@ define void @atomic_store_release_64_off(i64* %p, i64 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlrb w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_8(i8* %p, i8 %val) #0 {
-  store atomic i8 %val, i8* %p seq_cst, align 1
+define void @atomic_store_seq_cst_8(ptr %p, i8 %val) #0 {
+  store atomic i8 %val, ptr %p seq_cst, align 1
   ret void
 }
 
@@ -175,9 +175,9 @@ define void @atomic_store_seq_cst_8(i8* %p, i8 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #1
 ; CHECK-NEXT:  stlrb w1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_8_off(i8* %p, i8 %val) #0 {
-  %tmp0 = getelementptr i8, i8* %p, i32 1
-  store atomic i8 %val, i8* %tmp0 seq_cst, align 1
+define void @atomic_store_seq_cst_8_off(ptr %p, i8 %val) #0 {
+  %tmp0 = getelementptr i8, ptr %p, i32 1
+  store atomic i8 %val, ptr %tmp0 seq_cst, align 1
   ret void
 }
 
@@ -185,8 +185,8 @@ define void @atomic_store_seq_cst_8_off(i8* %p, i8 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlrh w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_16(i16* %p, i16 %val) #0 {
-  store atomic i16 %val, i16* %p seq_cst, align 2
+define void @atomic_store_seq_cst_16(ptr %p, i16 %val) #0 {
+  store atomic i16 %val, ptr %p seq_cst, align 2
   ret void
 }
 
@@ -195,9 +195,9 @@ define void @atomic_store_seq_cst_16(i16* %p, i16 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #2
 ; CHECK-NEXT:  stlrh w1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_16_off(i16* %p, i16 %val) #0 {
-  %tmp0 = getelementptr i16, i16* %p, i32 1
-  store atomic i16 %val, i16* %tmp0 seq_cst, align 2
+define void @atomic_store_seq_cst_16_off(ptr %p, i16 %val) #0 {
+  %tmp0 = getelementptr i16, ptr %p, i32 1
+  store atomic i16 %val, ptr %tmp0 seq_cst, align 2
   ret void
 }
 
@@ -205,8 +205,8 @@ define void @atomic_store_seq_cst_16_off(i16* %p, i16 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlr w1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_32(i32* %p, i32 %val) #0 {
-  store atomic i32 %val, i32* %p seq_cst, align 4
+define void @atomic_store_seq_cst_32(ptr %p, i32 %val) #0 {
+  store atomic i32 %val, ptr %p seq_cst, align 4
   ret void
 }
 
@@ -215,9 +215,9 @@ define void @atomic_store_seq_cst_32(i32* %p, i32 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #4
 ; CHECK-NEXT:  stlr w1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_32_off(i32* %p, i32 %val) #0 {
-  %tmp0 = getelementptr i32, i32* %p, i32 1
-  store atomic i32 %val, i32* %tmp0 seq_cst, align 4
+define void @atomic_store_seq_cst_32_off(ptr %p, i32 %val) #0 {
+  %tmp0 = getelementptr i32, ptr %p, i32 1
+  store atomic i32 %val, ptr %tmp0 seq_cst, align 4
   ret void
 }
 
@@ -225,8 +225,8 @@ define void @atomic_store_seq_cst_32_off(i32* %p, i32 %val) #0 {
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT:  stlr x1, [x0]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_64(i64* %p, i64 %val) #0 {
-  store atomic i64 %val, i64* %p seq_cst, align 8
+define void @atomic_store_seq_cst_64(ptr %p, i64 %val) #0 {
+  store atomic i64 %val, ptr %p seq_cst, align 8
   ret void
 }
 
@@ -235,9 +235,9 @@ define void @atomic_store_seq_cst_64(i64* %p, i64 %val) #0 {
 ; CHECK-NEXT:  add [[REG0:x[0-9]+]], x0, #8
 ; CHECK-NEXT:  stlr x1, [[[REG0]]]
 ; CHECK-NEXT:  ret
-define void @atomic_store_seq_cst_64_off(i64* %p, i64 %val) #0 {
-  %tmp0 = getelementptr i64, i64* %p, i32 1
-  store atomic i64 %val, i64* %tmp0 seq_cst, align 8
+define void @atomic_store_seq_cst_64_off(ptr %p, i64 %val) #0 {
+  %tmp0 = getelementptr i64, ptr %p, i32 1
+  store atomic i64 %val, ptr %tmp0 seq_cst, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-mask.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-mask.ll
index 63b2937a8a583..e58e233a0c551 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-mask.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-mask.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=aarch64-apple-darwin -O0 -fast-isel -fast-isel-abort=0 -verify-machineinstrs < %s | FileCheck %s
 
-define void @test(i64 %a, i64 %b, i2* %c) {
+define void @test(i64 %a, i64 %b, ptr %c) {
 ; CHECK-LABEL: test
 ; CHECK:       and [[REG1:w[0-9]+]], {{w[0-9]+}}, #0x3
 ; CHECK-NEXT:  strb [[REG1]], [x2]
@@ -8,7 +8,7 @@ define void @test(i64 %a, i64 %b, i2* %c) {
  %1 = trunc i64 %a to i2
  %2 = trunc i64 %b to i1
 ; Force fast-isel to fall back to SDAG.
- store i2 %1, i2* %c, align 8
+ store i2 %1, ptr %c, align 8
  br i1 %2, label %bb1, label %bb2
 
 bb1:

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-branch-uncond-debug.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch-uncond-debug.ll
index 902ec7a9c095c..d0f3c82433923 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-branch-uncond-debug.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-branch-uncond-debug.ll
@@ -12,7 +12,7 @@ entry:
 for.cond:
   br label %for.cond, !dbg !15, !llvm.loop !18
 }
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
 declare void @llvm.dbg.value(metadata, metadata, metadata) #2
 

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll b/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll
index a03b12e8d3ea5..c87038a7098f5 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-call-return.ll
@@ -2,11 +2,11 @@
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-linux-gnu"
 
-define i8* @test_call_return_type(i64 %size) {
+define ptr @test_call_return_type(i64 %size) {
 entry:
 ; CHECK: bl xmalloc
-  %0 = call noalias i8* @xmalloc(i64 undef)
-  ret i8* %0
+  %0 = call noalias ptr @xmalloc(i64 undef)
+  ret ptr %0
 }
 
-declare noalias i8* @xmalloc(i64)
+declare noalias ptr @xmalloc(i64)

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll b/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll
index 45cc678a0a15f..52398315da703 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-cbz.ll
@@ -57,10 +57,10 @@ bb1:
   ret i32 0
 }
 
-define i32 @icmp_eq_ptr(i8* %a) {
+define i32 @icmp_eq_ptr(ptr %a) {
 ; CHECK-LABEL: icmp_eq_ptr
 ; CHECK:       cbz x0, {{LBB.+_2}}
-  %1 = icmp eq i8* %a, null
+  %1 = icmp eq ptr %a, null
   br i1 %1, label %bb1, label %bb2
 bb2:
   ret i32 1

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-cmpxchg.ll b/llvm/test/CodeGen/AArch64/fast-isel-cmpxchg.ll
index 46909e0b25918..0adc103be27f7 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-cmpxchg.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-cmpxchg.ll
@@ -14,12 +14,12 @@
 ; CHECK-NEXT:     cset [[STATUS]], eq
 ; CHECK-NEXT:     and [[STATUS32:w[0-9]+]], [[STATUS]], #0x1
 ; CHECK-NEXT:     str [[STATUS32]], [x3]
-define i32 @cmpxchg_monotonic_32(i32* %p, i32 %cmp, i32 %new, i32* %ps) #0 {
-  %tmp0 = cmpxchg i32* %p, i32 %cmp, i32 %new monotonic monotonic
+define i32 @cmpxchg_monotonic_32(ptr %p, i32 %cmp, i32 %new, ptr %ps) #0 {
+  %tmp0 = cmpxchg ptr %p, i32 %cmp, i32 %new monotonic monotonic
   %tmp1 = extractvalue { i32, i1 } %tmp0, 0
   %tmp2 = extractvalue { i32, i1 } %tmp0, 1
   %tmp3 = zext i1 %tmp2 to i32
-  store i32 %tmp3, i32* %ps
+  store i32 %tmp3, ptr %ps
   ret i32 %tmp1
 }
 
@@ -39,13 +39,13 @@ define i32 @cmpxchg_monotonic_32(i32* %p, i32 %cmp, i32 %new, i32* %ps) #0 {
 ; CHECK-NEXT:     cset [[STATUS]], eq
 ; CHECK-NEXT:     and [[STATUS32:w[0-9]+]], [[STATUS]], #0x1
 ; CHECK-NEXT:     str [[STATUS32]], [x3]
-define i32 @cmpxchg_acq_rel_32_load(i32* %p, i32 %cmp, i32* %pnew, i32* %ps) #0 {
-  %new = load i32, i32* %pnew
-  %tmp0 = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel acquire
+define i32 @cmpxchg_acq_rel_32_load(ptr %p, i32 %cmp, ptr %pnew, ptr %ps) #0 {
+  %new = load i32, ptr %pnew
+  %tmp0 = cmpxchg ptr %p, i32 %cmp, i32 %new acq_rel acquire
   %tmp1 = extractvalue { i32, i1 } %tmp0, 0
   %tmp2 = extractvalue { i32, i1 } %tmp0, 1
   %tmp3 = zext i1 %tmp2 to i32
-  store i32 %tmp3, i32* %ps
+  store i32 %tmp3, ptr %ps
   ret i32 %tmp1
 }
 
@@ -63,12 +63,12 @@ define i32 @cmpxchg_acq_rel_32_load(i32* %p, i32 %cmp, i32* %pnew, i32* %ps) #0
 ; CHECK-NEXT:     cset [[STATUS:w[0-9]+]], eq
 ; CHECK-NEXT:     and [[STATUS32:w[0-9]+]], [[STATUS]], #0x1
 ; CHECK-NEXT:     str [[STATUS32]], [x3]
-define i64 @cmpxchg_seq_cst_64(i64* %p, i64 %cmp, i64 %new, i32* %ps) #0 {
-  %tmp0 = cmpxchg i64* %p, i64 %cmp, i64 %new seq_cst seq_cst
+define i64 @cmpxchg_seq_cst_64(ptr %p, i64 %cmp, i64 %new, ptr %ps) #0 {
+  %tmp0 = cmpxchg ptr %p, i64 %cmp, i64 %new seq_cst seq_cst
   %tmp1 = extractvalue { i64, i1 } %tmp0, 0
   %tmp2 = extractvalue { i64, i1 } %tmp0, 1
   %tmp3 = zext i1 %tmp2 to i32
-  store i32 %tmp3, i32* %ps
+  store i32 %tmp3, ptr %ps
   ret i64 %tmp1
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-erase.ll b/llvm/test/CodeGen/AArch64/fast-isel-erase.ll
index e8265bce9fec3..768bbcfef2beb 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-erase.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-erase.ll
@@ -9,8 +9,8 @@ define i32 @test() {
 ; CHECK-NOT: uxth
 
 entry:
-  store i32 undef, i32* undef, align 4
-  %t81 = load i16, i16* undef, align 2
+  store i32 undef, ptr undef, align 4
+  %t81 = load i16, ptr undef, align 2
   call void @callee()
   %t82 = zext i16 %t81 to i32
   %t83 = shl i32 %t82, 16

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-gep.ll b/llvm/test/CodeGen/AArch64/fast-isel-gep.ll
index 7b1c68beb46e2..9df826a2482e7 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-gep.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-gep.ll
@@ -3,54 +3,54 @@
 
 %struct.foo = type { i32, i64, float, double }
 
-define double* @test_struct(%struct.foo* %f) {
+define ptr @test_struct(ptr %f) {
 ; CHECK-LABEL: test_struct:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    add x0, x0, #24
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds %struct.foo, %struct.foo* %f, i64 0, i32 3
-  ret double* %1
+  %1 = getelementptr inbounds %struct.foo, ptr %f, i64 0, i32 3
+  ret ptr %1
 }
 
-define i32* @test_array1(i32* %a, i64 %i) {
+define ptr @test_array1(ptr %a, i64 %i) {
 ; CHECK-LABEL: test_array1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, #4
 ; CHECK-NEXT:    madd x0, x1, x8, x0
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i32, i32* %a, i64 %i
-  ret i32* %1
+  %1 = getelementptr inbounds i32, ptr %a, i64 %i
+  ret ptr %1
 }
 
-define i32* @test_array2(i32* %a) {
+define ptr @test_array2(ptr %a) {
 ; CHECK-LABEL: test_array2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    add x0, x0, #16
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i32, i32* %a, i64 4
-  ret i32* %1
+  %1 = getelementptr inbounds i32, ptr %a, i64 4
+  ret ptr %1
 }
 
-define i32* @test_array3(i32* %a) {
+define ptr @test_array3(ptr %a) {
 ; CHECK-LABEL: test_array3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    add x0, x0, #1, lsl #12 ; =4096
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i32, i32* %a, i64 1024
-  ret i32* %1
+  %1 = getelementptr inbounds i32, ptr %a, i64 1024
+  ret ptr %1
 }
 
-define i32* @test_array4(i32* %a) {
+define ptr @test_array4(ptr %a) {
 ; CHECK-LABEL: test_array4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov x8, #4104
 ; CHECK-NEXT:    add x0, x0, x8
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i32, i32* %a, i64 1026
-  ret i32* %1
+  %1 = getelementptr inbounds i32, ptr %a, i64 1026
+  ret ptr %1
 }
 
-define i32* @test_array5(i32* %a, i32 %i) {
+define ptr @test_array5(ptr %a, i32 %i) {
 ; CHECK-LABEL: test_array5:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $w1 killed $w1 def $x1
@@ -58,6 +58,6 @@ define i32* @test_array5(i32* %a, i32 %i) {
 ; CHECK-NEXT:    sxtw x9, w1
 ; CHECK-NEXT:    madd x0, x9, x8, x0
 ; CHECK-NEXT:    ret
-  %1 = getelementptr inbounds i32, i32* %a, i32 %i
-  ret i32* %1
+  %1 = getelementptr inbounds i32, ptr %a, i32 %i
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-int-ext.ll b/llvm/test/CodeGen/AArch64/fast-isel-int-ext.ll
index 4b2cab5fa7289..4eea79a25d7c6 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-int-ext.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-int-ext.ll
@@ -12,8 +12,8 @@ define i64 @load_addr_shift_zext1(i32 %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -23,8 +23,8 @@ define i64 @load_addr_shift_zext2(i32 zeroext %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -34,8 +34,8 @@ define i64 @load_addr_shift_zext3(i32 signext %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -45,8 +45,8 @@ define i64 @load_addr_shift_sext1(i32 %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -56,8 +56,8 @@ define i64 @load_addr_shift_sext2(i32 zeroext %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -67,8 +67,8 @@ define i64 @load_addr_shift_sext3(i32 signext %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = shl i64 %1, 3
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -81,8 +81,8 @@ define i64 @load_addr_mul_zext1(i32 %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -92,8 +92,8 @@ define i64 @load_addr_mul_zext2(i32 zeroext %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -103,8 +103,8 @@ define i64 @load_addr_mul_zext3(i32 signext %a, i64 %b) {
   %1 = zext i32 %a to i64
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -114,8 +114,8 @@ define i64 @load_addr_mul_sext1(i32 %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -125,8 +125,8 @@ define i64 @load_addr_mul_sext2(i32 zeroext %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -136,8 +136,8 @@ define i64 @load_addr_mul_sext3(i32 signext %a, i64 %b) {
   %1 = sext i32 %a to i64
   %2 = mul i64 %1, 8
   %3 = add i64 %b, %2
-  %4 = inttoptr i64 %3 to i64*
-  %5 = load i64, i64* %4
+  %4 = inttoptr i64 %3 to ptr
+  %5 = load i64, ptr %4
   ret i64 %5
 }
 
@@ -152,8 +152,8 @@ define i32 @load_unscaled_zext_i8_to_i32(i64 %a) {
 ; CHECK:       ldurb w0, [x0, #-8]
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = zext i8 %3 to i32
   ret i32 %4
 }
@@ -163,8 +163,8 @@ define i32 @load_unscaled_zext_i16_to_i32(i64 %a) {
 ; CHECK:       ldurh w0, [x0, #-8]
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = zext i16 %3 to i32
   ret i32 %4
 }
@@ -174,8 +174,8 @@ define i64 @load_unscaled_zext_i8_to_i64(i64 %a) {
 ; CHECK:       ldurb w0, [x0, #-8]
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = zext i8 %3 to i64
   ret i64 %4
 }
@@ -185,8 +185,8 @@ define i64 @load_unscaled_zext_i16_to_i64(i64 %a) {
 ; CHECK:       ldurh w0, [x0, #-8]
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = zext i16 %3 to i64
   ret i64 %4
 }
@@ -196,8 +196,8 @@ define i64 @load_unscaled_zext_i32_to_i64(i64 %a) {
 ; CHECK:       ldur w0, [x0, #-8]
 ; CHECK-NOT:   uxtw
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   %4 = zext i32 %3 to i64
   ret i64 %4
 }
@@ -207,8 +207,8 @@ define i32 @load_unscaled_sext_i8_to_i32(i64 %a) {
 ; CHECK:       ldursb w0, [x0, #-8]
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = sext i8 %3 to i32
   ret i32 %4
 }
@@ -218,8 +218,8 @@ define i32 @load_unscaled_sext_i16_to_i32(i64 %a) {
 ; CHECK:       ldursh w0, [x0, #-8]
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = sext i16 %3 to i32
   ret i32 %4
 }
@@ -229,8 +229,8 @@ define i64 @load_unscaled_sext_i8_to_i64(i64 %a) {
 ; CHECK:       ldursb x0, [x0, #-8]
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = sext i8 %3 to i64
   ret i64 %4
 }
@@ -240,8 +240,8 @@ define i64 @load_unscaled_sext_i16_to_i64(i64 %a) {
 ; CHECK:       ldursh x0, [x0, #-8]
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = sext i16 %3 to i64
   ret i64 %4
 }
@@ -251,8 +251,8 @@ define i64 @load_unscaled_sext_i32_to_i64(i64 %a) {
 ; CHECK:       ldursw x0, [x0, #-8]
 ; CHECK-NOT:   sxtw
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   %4 = sext i32 %3 to i64
   ret i64 %4
 }
@@ -263,8 +263,8 @@ define i32 @load_register_zext_i8_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrb w0, [x0, x1]
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = zext i8 %3 to i32
   ret i32 %4
 }
@@ -274,8 +274,8 @@ define i32 @load_register_zext_i16_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrh w0, [x0, x1]
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = zext i16 %3 to i32
   ret i32 %4
 }
@@ -285,8 +285,8 @@ define i64 @load_register_zext_i8_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrb w0, [x0, x1]
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = zext i8 %3 to i64
   ret i64 %4
 }
@@ -296,8 +296,8 @@ define i64 @load_register_zext_i16_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrh w0, [x0, x1]
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = zext i16 %3 to i64
   ret i64 %4
 }
@@ -307,8 +307,8 @@ define i64 @load_register_zext_i32_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldr w0, [x0, x1]
 ; CHECK-NOT:   uxtw
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   %4 = zext i32 %3 to i64
   ret i64 %4
 }
@@ -318,8 +318,8 @@ define i32 @load_register_sext_i8_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrsb w0, [x0, x1]
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = sext i8 %3 to i32
   ret i32 %4
 }
@@ -329,8 +329,8 @@ define i32 @load_register_sext_i16_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrsh w0, [x0, x1]
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = sext i16 %3 to i32
   ret i32 %4
 }
@@ -340,8 +340,8 @@ define i64 @load_register_sext_i8_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrsb x0, [x0, x1]
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   %4 = sext i8 %3 to i64
   ret i64 %4
 }
@@ -351,8 +351,8 @@ define i64 @load_register_sext_i16_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrsh x0, [x0, x1]
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   %4 = sext i16 %3 to i64
   ret i64 %4
 }
@@ -362,8 +362,8 @@ define i64 @load_register_sext_i32_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrsw x0, [x0, x1]
 ; CHECK-NOT:   sxtw
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   %4 = sext i32 %3 to i64
   ret i64 %4
 }
@@ -375,8 +375,8 @@ define i32 @load_extend_zext_i8_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   %5 = zext i8 %4 to i32
   ret i32 %5
 }
@@ -387,8 +387,8 @@ define i32 @load_extend_zext_i16_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   %5 = zext i16 %4 to i32
   ret i32 %5
 }
@@ -399,8 +399,8 @@ define i64 @load_extend_zext_i8_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   %5 = zext i8 %4 to i64
   ret i64 %5
 }
@@ -411,8 +411,8 @@ define i64 @load_extend_zext_i16_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   %5 = zext i16 %4 to i64
   ret i64 %5
 }
@@ -423,8 +423,8 @@ define i64 @load_extend_zext_i32_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxtw
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i32*
-  %4 = load i32, i32* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i32, ptr %3
   %5 = zext i32 %4 to i64
   ret i64 %5
 }
@@ -435,8 +435,8 @@ define i32 @load_extend_sext_i8_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   %5 = sext i8 %4 to i32
   ret i32 %5
 }
@@ -447,8 +447,8 @@ define i32 @load_extend_sext_i16_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   %5 = sext i16 %4 to i32
   ret i32 %5
 }
@@ -459,8 +459,8 @@ define i64 @load_extend_sext_i8_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   %5 = sext i8 %4 to i64
   ret i64 %5
 }
@@ -471,8 +471,8 @@ define i64 @load_extend_sext_i16_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   %5 = sext i16 %4 to i64
   ret i64 %5
 }
@@ -483,8 +483,8 @@ define i64 @load_extend_sext_i32_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxtw
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i32*
-  %4 = load i32, i32* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i32, ptr %3
   %5 = sext i32 %4 to i64
   ret i64 %5
 }

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll b/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll
index b974f412d8491..98e020e01da60 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll
@@ -10,8 +10,8 @@ define i32 @load_unscaled_zext_i8_to_i32(i64 %a) {
 ; CHECK:       ldurb w0, [x0, #-8]
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -24,8 +24,8 @@ define i32 @load_unscaled_zext_i16_to_i32(i64 %a) {
 ; CHECK:       ldurh w0, [x0, #-8]
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -38,8 +38,8 @@ define i64 @load_unscaled_zext_i8_to_i64(i64 %a) {
 ; CHECK:       ldurb w0, [x0, #-8]
 ; CHECK-NOT:   uxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -52,8 +52,8 @@ define i64 @load_unscaled_zext_i16_to_i64(i64 %a) {
 ; CHECK:       ldurh w0, [x0, #-8]
 ; CHECK-NOT:   uxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -66,8 +66,8 @@ define i64 @load_unscaled_zext_i32_to_i64(i64 %a) {
 ; CHECK:       ldur w0, [x0, #-8]
 ; CHECK-NOT:   uxtw
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   br label %bb2
 
 bb2:
@@ -80,8 +80,8 @@ define i32 @load_unscaled_sext_i8_to_i32(i64 %a) {
 ; CHECK:       ldursb w0, [x0, #-8]
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -94,8 +94,8 @@ define i32 @load_unscaled_sext_i16_to_i32(i64 %a) {
 ; CHECK:       ldursh w0, [x0, #-8]
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -108,8 +108,8 @@ define i64 @load_unscaled_sext_i8_to_i64(i64 %a) {
 ; CHECK:       ldursb x0, [x0, #-8]
 ; CHECK-NOT:   sxtb
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -122,8 +122,8 @@ define i64 @load_unscaled_sext_i16_to_i64(i64 %a) {
 ; CHECK:       ldursh x0, [x0, #-8]
 ; CHECK-NOT:   sxth
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -136,8 +136,8 @@ define i64 @load_unscaled_sext_i32_to_i64(i64 %a) {
 ; CHECK:       ldursw x0, [x0, #-8]
 ; CHECK-NOT:   sxtw
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   br label %bb2
 
 bb2:
@@ -151,8 +151,8 @@ define i32 @load_register_zext_i8_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrb w0, [x0, x1]
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -165,8 +165,8 @@ define i32 @load_register_zext_i16_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrh w0, [x0, x1]
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -179,8 +179,8 @@ define i64 @load_register_zext_i8_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrb w0, [x0, x1]
 ; CHECK-NOT:   uxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -193,8 +193,8 @@ define i64 @load_register_zext_i16_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrh w0, [x0, x1]
 ; CHECK-NOT:   uxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -207,8 +207,8 @@ define i64 @load_register_zext_i32_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldr w0, [x0, x1]
 ; CHECK-NOT:   uxtw
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   br label %bb2
 
 bb2:
@@ -221,8 +221,8 @@ define i32 @load_register_sext_i8_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrsb w0, [x0, x1]
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -235,8 +235,8 @@ define i32 @load_register_sext_i16_to_i32(i64 %a, i64 %b) {
 ; CHECK:       ldrsh w0, [x0, x1]
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -249,8 +249,8 @@ define i64 @load_register_sext_i8_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrsb x0, [x0, x1]
 ; CHECK-NOT:   sxtb
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i8*
-  %3 = load i8, i8* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i8, ptr %2
   br label %bb2
 
 bb2:
@@ -263,8 +263,8 @@ define i64 @load_register_sext_i16_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrsh x0, [x0, x1]
 ; CHECK-NOT:   sxth
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i16*
-  %3 = load i16, i16* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i16, ptr %2
   br label %bb2
 
 bb2:
@@ -277,8 +277,8 @@ define i64 @load_register_sext_i32_to_i64(i64 %a, i64 %b) {
 ; CHECK:       ldrsw x0, [x0, x1]
 ; CHECK-NOT:   sxtw
   %1 = add i64 %a, %b
-  %2 = inttoptr i64 %1 to i32*
-  %3 = load i32, i32* %2
+  %2 = inttoptr i64 %1 to ptr
+  %3 = load i32, ptr %2
   br label %bb2
 
 bb2:
@@ -293,8 +293,8 @@ define i32 @load_extend_zext_i8_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   br label %bb2
 
 bb2:
@@ -308,8 +308,8 @@ define i32 @load_extend_zext_i16_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   br label %bb2
 
 bb2:
@@ -323,8 +323,8 @@ define i64 @load_extend_zext_i8_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   br label %bb2
 
 bb2:
@@ -338,8 +338,8 @@ define i64 @load_extend_zext_i16_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   br label %bb2
 
 bb2:
@@ -353,8 +353,8 @@ define i64 @load_extend_zext_i32_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   uxtw
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i32*
-  %4 = load i32, i32* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i32, ptr %3
   br label %bb2
 
 bb2:
@@ -368,8 +368,8 @@ define i32 @load_extend_sext_i8_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   br label %bb2
 
 bb2:
@@ -383,8 +383,8 @@ define i32 @load_extend_sext_i16_to_i32(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   br label %bb2
 
 bb2:
@@ -398,8 +398,8 @@ define i64 @load_extend_sext_i8_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxtb
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i8*
-  %4 = load i8, i8* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i8, ptr %3
   br label %bb2
 
 bb2:
@@ -413,8 +413,8 @@ define i64 @load_extend_sext_i16_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxth
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i16*
-  %4 = load i16, i16* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i16, ptr %3
   br label %bb2
 
 bb2:
@@ -428,8 +428,8 @@ define i64 @load_extend_sext_i32_to_i64(i64 %a, i32 %b) {
 ; CHECK-NOT:   sxtw
   %1 = sext i32 %b to i64
   %2 = add i64 %a, %1
-  %3 = inttoptr i64 %2 to i32*
-  %4 = load i32, i32* %3
+  %3 = inttoptr i64 %2 to ptr
+  %4 = load i32, ptr %3
   br label %bb2
 
 bb2:

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-int-ext3.ll b/llvm/test/CodeGen/AArch64/fast-isel-int-ext3.ll
index 83740c8af5f27..7602ce3fb1c7f 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-int-ext3.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-int-ext3.ll
@@ -10,8 +10,8 @@ define i32 @load_unscaled_zext_i8_to_i32(i64 %a) {
 ; CHECK:       ldurb [[REG:w[0-9]+]], [x0, #-8]
 ; CHECK:       uxtb w0, [[REG]]
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8, i8 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i8, ptr addrspace(256) %2
   %4 = zext i8 %3 to i32
   ret i32 %4
 }
@@ -21,8 +21,8 @@ define i32 @load_unscaled_zext_i16_to_i32(i64 %a) {
 ; CHECK:       ldurh [[REG:w[0-9]+]], [x0, #-8]
 ; CHECK:       uxth w0, [[REG]]
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16, i16 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i16, ptr addrspace(256) %2
   %4 = zext i16 %3 to i32
   ret i32 %4
 }
@@ -32,8 +32,8 @@ define i64 @load_unscaled_zext_i8_to_i64(i64 %a) {
 ; CHECK:       ldurb w[[REG:[0-9]+]], [x0, #-8]
 ; CHECK:       ubfx x0, x[[REG]], #0, #8
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8, i8 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i8, ptr addrspace(256) %2
   %4 = zext i8 %3 to i64
   ret i64 %4
 }
@@ -43,8 +43,8 @@ define i64 @load_unscaled_zext_i16_to_i64(i64 %a) {
 ; CHECK:       ldurh w[[REG:[0-9]+]], [x0, #-8]
 ; CHECK:       ubfx x0, x[[REG]], #0, #16
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16, i16 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i16, ptr addrspace(256) %2
   %4 = zext i16 %3 to i64
   ret i64 %4
 }
@@ -54,8 +54,8 @@ define i64 @load_unscaled_zext_i32_to_i64(i64 %a) {
 ; CHECK:       ldur w[[REG:[0-9]+]], [x0, #-8]
 ; CHECK:       ubfx x0, x[[REG]], #0, #32
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i32 addrspace(256)*
-  %3 = load i32, i32 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i32, ptr addrspace(256) %2
   %4 = zext i32 %3 to i64
   ret i64 %4
 }
@@ -65,8 +65,8 @@ define i32 @load_unscaled_sext_i8_to_i32(i64 %a) {
 ; CHECK:       ldurb [[REG:w[0-9]+]], [x0, #-8]
 ; CHECK:       sxtb w0, [[REG]]
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8, i8 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i8, ptr addrspace(256) %2
   %4 = sext i8 %3 to i32
   ret i32 %4
 }
@@ -76,8 +76,8 @@ define i32 @load_unscaled_sext_i16_to_i32(i64 %a) {
 ; CHECK:       ldurh [[REG:w[0-9]+]], [x0, #-8]
 ; CHECK:       sxth w0, [[REG]]
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16, i16 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i16, ptr addrspace(256) %2
   %4 = sext i16 %3 to i32
   ret i32 %4
 }
@@ -87,8 +87,8 @@ define i64 @load_unscaled_sext_i8_to_i64(i64 %a) {
 ; CHECK:       ldurb [[REG:w[0-9]+]], [x0, #-8]
 ; CHECK:       sxtb x0, [[REG]]
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i8 addrspace(256)*
-  %3 = load i8, i8 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i8, ptr addrspace(256) %2
   %4 = sext i8 %3 to i64
   ret i64 %4
 }
@@ -98,8 +98,8 @@ define i64 @load_unscaled_sext_i16_to_i64(i64 %a) {
 ; CHECK:       ldurh [[REG:w[0-9]+]], [x0, #-8]
 ; CHECK:       sxth x0, [[REG]]
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i16 addrspace(256)*
-  %3 = load i16, i16 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i16, ptr addrspace(256) %2
   %4 = sext i16 %3 to i64
   ret i64 %4
 }
@@ -109,8 +109,8 @@ define i64 @load_unscaled_sext_i32_to_i64(i64 %a) {
 ; CHECK:       ldur [[REG:w[0-9]+]], [x0, #-8]
 ; CHECK:       sxtw x0, [[REG]]
   %1 = sub i64 %a, 8
-  %2 = inttoptr i64 %1 to i32 addrspace(256)*
-  %3 = load i32, i32 addrspace(256)* %2
+  %2 = inttoptr i64 %1 to ptr addrspace(256)
+  %3 = load i32, ptr addrspace(256) %2
   %4 = sext i32 %3 to i64
   ret i64 %4
 }

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-int-ext5.ll b/llvm/test/CodeGen/AArch64/fast-isel-int-ext5.ll
index 0f9ec62811df7..569874ac5cdc6 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-int-ext5.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-int-ext5.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -mtriple=aarch64-apple-darwin -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s
 
 ; CHECK-LABEL: int_ext_opt
-define i64 @int_ext_opt(i8* %addr, i1 %c1, i1 %c2) {
+define i64 @int_ext_opt(ptr %addr, i1 %c1, i1 %c2) {
 entry:
-  %0 = load i8, i8* %addr
+  %0 = load i8, ptr %addr
   br i1 %c1, label %bb1, label %bb2
 
 bb1:

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll b/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll
index cb084a39b3e3b..b7971af9906c2 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-memcpy.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s
 
 ; Test that we don't segfault.
-define void @test(i64 %a, i8* %b) {
+define void @test(i64 %a, ptr %b) {
 ; CHECK-LABEL: test:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    and x8, x0, #0x7fffffffffffffff
@@ -10,9 +10,9 @@ define void @test(i64 %a, i8* %b) {
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
   %1 = and i64 %a, 9223372036854775807
-  %2 = inttoptr i64 %1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %2, i8* align 8 %b, i64 8, i1 false)
+  %2 = inttoptr i64 %1 to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 %2, ptr align 8 %b, i64 8, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)

diff  --git a/llvm/test/CodeGen/AArch64/fastisel-debugvalue-undef.ll b/llvm/test/CodeGen/AArch64/fastisel-debugvalue-undef.ll
index aceb85f4dd7f0..cccede6a50459 100644
--- a/llvm/test/CodeGen/AArch64/fastisel-debugvalue-undef.ll
+++ b/llvm/test/CodeGen/AArch64/fastisel-debugvalue-undef.ll
@@ -5,7 +5,7 @@
 target triple = "arm64-apple-ios13.4.0"
 define void @foo() !dbg !6 {
   ; CHECK: DBG_VALUE $noreg, $noreg, !"1", !DIExpression()
-  call void @llvm.dbg.value(metadata i32* undef, metadata !9, metadata !DIExpression()), !dbg !11
+  call void @llvm.dbg.value(metadata ptr undef, metadata !9, metadata !DIExpression()), !dbg !11
   ret void, !dbg !12
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fcopysign.ll b/llvm/test/CodeGen/AArch64/fcopysign.ll
index 74d1818b6a4ab..6d5389389db5c 100644
--- a/llvm/test/CodeGen/AArch64/fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/fcopysign.ll
@@ -46,7 +46,7 @@ define fp128 @copysign0() {
 ; CHECK-NONEON-NEXT:    ldr q0, [sp], #16
 ; CHECK-NONEON-NEXT:    ret
 entry:
-  %v = load double, double* @val_double, align 8
+  %v = load double, ptr @val_double, align 8
   %conv = fpext double %v to fp128
   %call = tail call fp128 @llvm.copysign.f128(fp128 0xL00000000000000007FFF000000000000, fp128 %conv) #2
   ret fp128 %call
@@ -85,8 +85,8 @@ define fp128 at copysign1() {
 ; CHECK-NONEON-NEXT:    ldr q0, [sp], #16
 ; CHECK-NONEON-NEXT:    ret
 entry:
-  %v0 = load fp128, fp128* @val_fp128, align 16
-  %v1 = load float, float* @val_float, align 4
+  %v0 = load fp128, ptr @val_fp128, align 16
+  %v1 = load float, ptr @val_float, align 4
   %conv = fpext float %v1 to fp128
   %call = tail call fp128 @llvm.copysign.f128(fp128 %v0, fp128 %conv)
   ret fp128 %call

diff  --git a/llvm/test/CodeGen/AArch64/flags-multiuse.ll b/llvm/test/CodeGen/AArch64/flags-multiuse.ll
index a13f7e1e34acc..62aaa9e0e8250 100644
--- a/llvm/test/CodeGen/AArch64/flags-multiuse.ll
+++ b/llvm/test/CodeGen/AArch64/flags-multiuse.ll
@@ -20,7 +20,7 @@ define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
 ; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]]
 ; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]]
 
-  store i32 %val, i32* @var
+  store i32 %val, ptr @var
 
   call void @bar()
 ; CHECK: bl bar

diff  --git a/llvm/test/CodeGen/AArch64/floatdp_2source.ll b/llvm/test/CodeGen/AArch64/floatdp_2source.ll
index 30e2856a4f5b5..c2f977ce53ed7 100644
--- a/llvm/test/CodeGen/AArch64/floatdp_2source.ll
+++ b/llvm/test/CodeGen/AArch64/floatdp_2source.ll
@@ -5,7 +5,7 @@
 
 define void @testfloat() {
 ; CHECK-LABEL: testfloat:
-  %val1 = load float, float* @varfloat
+  %val1 = load float, ptr @varfloat
 
   %val2 = fadd float %val1, %val1
 ; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
@@ -19,21 +19,21 @@ define void @testfloat() {
   %val5 = fsub float %val4, %val2
 ; CHECK: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
 
-  store volatile float %val5, float* @varfloat
+  store volatile float %val5, ptr @varfloat
 
 ; These will be enabled with the implementation of floating-point litpool entries.
   %val6 = fmul float %val1, %val2
   %val7 = fsub float -0.0, %val6
 ; CHECK: fnmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
 
-  store volatile float %val7, float* @varfloat
+  store volatile float %val7, ptr @varfloat
 
   ret void
 }
 
 define void @testdouble() {
 ; CHECK-LABEL: testdouble:
-  %val1 = load double, double* @vardouble
+  %val1 = load double, ptr @vardouble
 
   %val2 = fadd double %val1, %val1
 ; CHECK: fadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
@@ -47,14 +47,14 @@ define void @testdouble() {
   %val5 = fsub double %val4, %val2
 ; CHECK: fsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
 
-  store volatile double %val5, double* @vardouble
+  store volatile double %val5, ptr @vardouble
 
 ; These will be enabled with the implementation of doubleing-point litpool entries.
    %val6 = fmul double %val1, %val2
    %val7 = fsub double -0.0, %val6
 ; CHECK: fnmul {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
 
-   store volatile double %val7, double* @vardouble
+   store volatile double %val7, ptr @vardouble
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
index e469adf5484a7..34b13faad69b7 100644
--- a/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/fold-global-offsets.ll
@@ -4,7 +4,7 @@
 
 @x1 = external hidden global [2 x i64]
 @x2 = external hidden global [16777216 x i64]
- at x3 = external hidden global { [9 x i8*], [8 x i8*] }
+ at x3 = external hidden global { [9 x ptr], [8 x ptr] }
 
 define i64 @f1() {
 ; CHECK-LABEL: f1:
@@ -18,7 +18,7 @@ define i64 @f1() {
 ; GISEL-NEXT:    adrp x8, x1+16
 ; GISEL-NEXT:    ldr x0, [x8, :lo12:x1+16]
 ; GISEL-NEXT:    ret
-  %l = load i64, i64* getelementptr ([2 x i64], [2 x i64]* @x1, i64 0, i64 2)
+  %l = load i64, ptr getelementptr ([2 x i64], ptr @x1, i64 0, i64 2)
   ret i64 %l
 }
 
@@ -37,7 +37,7 @@ define i64 @f2() {
 ; GISEL-NEXT:    ldr x0, [x8, #24]
 ; GISEL-NEXT:    ret
 
-  %l = load i64, i64* getelementptr ([2 x i64], [2 x i64]* @x1, i64 0, i64 3)
+  %l = load i64, ptr getelementptr ([2 x i64], ptr @x1, i64 0, i64 3)
   ret i64 %l
 }
 
@@ -55,7 +55,7 @@ define i64 @f3() {
 ; GISEL-NEXT:    add x8, x8, :lo12:x1+1
 ; GISEL-NEXT:    ldr x0, [x8]
 ; GISEL-NEXT:    ret
-  %l = load i64, i64* bitcast (i8* getelementptr (i8, i8* bitcast ([2 x i64]* @x1 to i8*), i64 1) to i64*)
+  %l = load i64, ptr getelementptr (i8, ptr @x1, i64 1)
   ret i64 %l
 }
 
@@ -77,7 +77,7 @@ define [2 x i64] @f4() {
 ; GISEL-NEXT:    ldr x0, [x8, :lo12:x2+8]
 ; GISEL-NEXT:    ldr x1, [x9, #8]
 ; GISEL-NEXT:    ret
-  %l = load [2 x i64], [2 x i64]* bitcast (i8* getelementptr (i8, i8* bitcast ([16777216 x i64]* @x2 to i8*), i64 8) to [2 x i64]*)
+  %l = load [2 x i64], ptr getelementptr (i8, ptr @x2, i64 8)
   ret [2 x i64] %l
 }
 
@@ -93,7 +93,7 @@ define i64 @f5() {
 ; GISEL-NEXT:    adrp x8, x2+1048568
 ; GISEL-NEXT:    ldr x0, [x8, :lo12:x2+1048568]
 ; GISEL-NEXT:    ret
-  %l = load i64, i64* getelementptr ([16777216 x i64], [16777216 x i64]* @x2, i64 0, i64 131071)
+  %l = load i64, ptr getelementptr ([16777216 x i64], ptr @x2, i64 0, i64 131071)
   ret i64 %l
 }
 
@@ -113,7 +113,7 @@ define i64 @f6() {
 ; GISEL-NEXT:    add x9, x9, :lo12:x2
 ; GISEL-NEXT:    ldr x0, [x9, x8]
 ; GISEL-NEXT:    ret
-  %l = load i64, i64* getelementptr ([16777216 x i64], [16777216 x i64]* @x2, i64 0, i64 131072)
+  %l = load i64, ptr getelementptr ([16777216 x i64], ptr @x2, i64 0, i64 131072)
   ret i64 %l
 }
 
@@ -131,6 +131,6 @@ define i32 @f7() {
 ; GISEL-NEXT:    ret
 
 entry:
-  %l = load i32, i32* getelementptr (i32, i32* inttoptr (i64 trunc (i128 lshr (i128 bitcast (<2 x i64> <i64 undef, i64 ptrtoint (i8** getelementptr inbounds ({ [9 x i8*], [8 x i8*] }, { [9 x i8*], [8 x i8*] }* @x3, i64 0, inrange i32 1, i64 2) to i64)> to i128), i128 64) to i64) to i32*), i64 5)
+  %l = load i32, ptr getelementptr (i32, ptr inttoptr (i64 trunc (i128 lshr (i128 bitcast (<2 x i64> <i64 undef, i64 ptrtoint (ptr getelementptr inbounds ({ [9 x ptr], [8 x ptr] }, ptr @x3, i64 0, inrange i32 1, i64 2) to i64)> to i128), i128 64) to i64) to ptr), i64 5)
   ret i32 %l
 }

diff  --git a/llvm/test/CodeGen/AArch64/fp-cond-sel.ll b/llvm/test/CodeGen/AArch64/fp-cond-sel.ll
index 570088385d0d8..4f039e56a99d7 100644
--- a/llvm/test/CodeGen/AArch64/fp-cond-sel.ll
+++ b/llvm/test/CodeGen/AArch64/fp-cond-sel.ll
@@ -11,7 +11,7 @@ define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
 
   %tst1 = icmp ugt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float 0.0, float 1.0
-  store float %val1, float* @varfloat
+  store float %val1, ptr @varfloat
 ; CHECK-DAG: fmov s[[FLT0:[0-9]+]], wzr
 ; CHECK-DAG: fmov s[[FLT1:[0-9]+]], #1.0
 ; CHECK: fcsel {{s[0-9]+}}, s[[FLT0]], s[[FLT1]], hi
@@ -19,7 +19,7 @@ define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
   %rhs64 = sext i32 %rhs32 to i64
   %tst2 = icmp sle i64 %lhs64, %rhs64
   %val2 = select i1 %tst2, double 1.0, double 0.0
-  store double %val2, double* @vardouble
+  store double %val2, ptr @vardouble
 ; CHECK-DAG: fmov d[[FLT0:[0-9]+]], xzr
 ; CHECK-DAG: fmov d[[FLT1:[0-9]+]], #1.0
 ; CHECK: fcsel {{d[0-9]+}}, d[[FLT1]], d[[FLT0]], le

diff  --git a/llvm/test/CodeGen/AArch64/fp-const-fold.ll b/llvm/test/CodeGen/AArch64/fp-const-fold.ll
index dc3f71001d610..c18c9a3dea1a3 100644
--- a/llvm/test/CodeGen/AArch64/fp-const-fold.ll
+++ b/llvm/test/CodeGen/AArch64/fp-const-fold.ll
@@ -3,7 +3,7 @@
 
 ; https://bugs.llvm.org/show_bug.cgi?id=41668
 
-define double @constant_fold_fdiv_by_zero(double* %p) {
+define double @constant_fold_fdiv_by_zero(ptr %p) {
 ; CHECK-LABEL: constant_fold_fdiv_by_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #9218868437227405312
@@ -15,7 +15,7 @@ define double @constant_fold_fdiv_by_zero(double* %p) {
 
 ; frem by 0.0 --> NaN
 
-define double @constant_fold_frem_by_zero(double* %p) {
+define double @constant_fold_frem_by_zero(ptr %p) {
 ; CHECK-LABEL: constant_fold_frem_by_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #9221120237041090560
@@ -27,7 +27,7 @@ define double @constant_fold_frem_by_zero(double* %p) {
 
 ; Inf * 0.0 --> NaN
 
-define double @constant_fold_fmul_nan(double* %p) {
+define double @constant_fold_fmul_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fmul_nan:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #9221120237041090560
@@ -39,7 +39,7 @@ define double @constant_fold_fmul_nan(double* %p) {
 
 ; Inf + -Inf --> NaN
 
-define double @constant_fold_fadd_nan(double* %p) {
+define double @constant_fold_fadd_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fadd_nan:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #9221120237041090560
@@ -51,7 +51,7 @@ define double @constant_fold_fadd_nan(double* %p) {
 
 ; Inf - Inf --> NaN
 
-define double @constant_fold_fsub_nan(double* %p) {
+define double @constant_fold_fsub_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fsub_nan:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #9221120237041090560
@@ -63,7 +63,7 @@ define double @constant_fold_fsub_nan(double* %p) {
 
 ; Inf * 0.0 + ? --> NaN
 
-define double @constant_fold_fma_nan(double* %p) {
+define double @constant_fold_fma_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fma_nan:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #9221120237041090560

diff  --git a/llvm/test/CodeGen/AArch64/fp128-folding.ll b/llvm/test/CodeGen/AArch64/fp128-folding.ll
index 5027e83c292a7..43e00e30d310c 100644
--- a/llvm/test/CodeGen/AArch64/fp128-folding.ll
+++ b/llvm/test/CodeGen/AArch64/fp128-folding.ll
@@ -1,5 +1,5 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s
-declare void @bar(i8*, i8*, i32*)
+declare void @bar(ptr, ptr, ptr)
 
 ; SelectionDAG used to try to fold some fp128 operations using the ppc128 type,
 ; which is not supported.
@@ -7,8 +7,8 @@ declare void @bar(i8*, i8*, i32*)
 define fp128 @test_folding() {
 ; CHECK-LABEL: test_folding:
   %l = alloca i32
-  store i32 42, i32* %l
-  %val = load i32, i32* %l
+  store i32 42, ptr %l
+  %val = load i32, ptr %l
   %fpval = sitofp i32 %val to fp128
   ; If the value is loaded from a constant pool into an fp128, it's been folded
   ; successfully.

diff  --git a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll
index e77a4a4542e48..99e173d289d5e 100644
--- a/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll
@@ -74,22 +74,22 @@ entry:
 }
 
 
-define <4 x half> @load_h(<4 x half>* %a) {
+define <4 x half> @load_h(ptr %a) {
 entry:
 ; CHECK-COMMON-LABEL: load_h:
 ; CHECK-COMMON:       ldr d0, [x0]
 ; CHECK-COMMON-NEXT:  ret
-  %0 = load <4 x half>, <4 x half>* %a, align 4
+  %0 = load <4 x half>, ptr %a, align 4
   ret <4 x half> %0
 }
 
 
-define void @store_h(<4 x half>* %a, <4 x half> %b) {
+define void @store_h(ptr %a, <4 x half> %b) {
 entry:
 ; CHECK-COMMON-LABEL: store_h:
 ; CHECK-COMMON:       str d0, [x0]
 ; CHECK-COMMON-NEXT:  ret
-  store <4 x half> %b, <4 x half>* %a, align 4
+  store <4 x half> %b, ptr %a, align 4
   ret void
 }
 
@@ -246,12 +246,12 @@ define <4 x half> @uitofp_i64(<4 x i64> %a) #0 {
   ret <4 x half> %1
 }
 
-define void @test_insert_at_zero(half %a, <4 x half>* %b) #0 {
+define void @test_insert_at_zero(half %a, ptr %b) #0 {
 ; CHECK-COMMON-LABEL: test_insert_at_zero:
 ; CHECK-COMMON-NEXT:  str d0, [x0]
 ; CHECK-COMMON-NEXT:  ret
   %1 = insertelement <4 x half> undef, half %a, i64 0
-  store <4 x half> %1, <4 x half>* %b, align 4
+  store <4 x half> %1, ptr %b, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll
index 952f48f88dc79..92d99ead70168 100644
--- a/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll
@@ -278,24 +278,24 @@ entry:
 }
 
 
-define <8 x half> @load_h(<8 x half>* %a) {
+define <8 x half> @load_h(ptr %a) {
 ; CHECK-LABEL: load_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load <8 x half>, <8 x half>* %a, align 4
+  %0 = load <8 x half>, ptr %a, align 4
   ret <8 x half> %0
 }
 
 
-define void @store_h(<8 x half>* %a, <8 x half> %b) {
+define void @store_h(ptr %a, <8 x half> %b) {
 ; CHECK-LABEL: store_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  store <8 x half> %b, <8 x half>* %a, align 4
+  store <8 x half> %b, ptr %a, align 4
   ret void
 }
 
@@ -647,14 +647,14 @@ define <8 x half> @uitofp_i64(<8 x i64> %a) #0 {
   ret <8 x half> %1
 }
 
-define void @test_insert_at_zero(half %a, <8 x half>* %b) #0 {
+define void @test_insert_at_zero(half %a, ptr %b) #0 {
 ; CHECK-LABEL: test_insert_at_zero:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 def $q0
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %1 = insertelement <8 x half> undef, half %a, i64 0
-  store <8 x half> %1, <8 x half>* %b, align 4
+  store <8 x half> %1, ptr %b, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fp16-vector-load-store.ll b/llvm/test/CodeGen/AArch64/fp16-vector-load-store.ll
index 1d1794abc306f..f0edfde6cf567 100644
--- a/llvm/test/CodeGen/AArch64/fp16-vector-load-store.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-vector-load-store.ll
@@ -1,606 +1,606 @@
 ; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
 
 ; Simple load of v4i16
-define <4 x half> @load_64(<4 x half>* nocapture readonly %a) #0 {
+define <4 x half> @load_64(ptr nocapture readonly %a) #0 {
 ; CHECK-LABEL: load_64:
 ; CHECK: ldr d0, [x0]
 entry:
-  %0 = load <4 x half>, <4 x half>* %a, align 8
+  %0 = load <4 x half>, ptr %a, align 8
   ret <4 x half> %0
 }
 
 ; Simple load of v8i16
-define <8 x half> @load_128(<8 x half>* nocapture readonly %a) #0 {
+define <8 x half> @load_128(ptr nocapture readonly %a) #0 {
 ; CHECK-LABEL: load_128:
 ; CHECK: ldr q0, [x0]
 entry:
-  %0 = load <8 x half>, <8 x half>* %a, align 16
+  %0 = load <8 x half>, ptr %a, align 16
   ret <8 x half> %0
 }
 
 ; Duplicating load to v4i16
-define <4 x half> @load_dup_64(half* nocapture readonly %a) #0 {
+define <4 x half> @load_dup_64(ptr nocapture readonly %a) #0 {
 ; CHECK-LABEL: load_dup_64:
 ; CHECK: ld1r { v0.4h }, [x0]
 entry:
-  %0 = load half, half* %a, align 2
+  %0 = load half, ptr %a, align 2
   %1 = insertelement <4 x half> undef, half %0, i32 0
   %2 = shufflevector <4 x half> %1, <4 x half> undef, <4 x i32> zeroinitializer
   ret <4 x half> %2
 }
 
 ; Duplicating load to v8i16
-define <8 x half> @load_dup_128(half* nocapture readonly %a) #0 {
+define <8 x half> @load_dup_128(ptr nocapture readonly %a) #0 {
 ; CHECK-LABEL: load_dup_128:
 ; CHECK: ld1r { v0.8h }, [x0]
 entry:
-  %0 = load half, half* %a, align 2
+  %0 = load half, ptr %a, align 2
   %1 = insertelement <8 x half> undef, half %0, i32 0
   %2 = shufflevector <8 x half> %1, <8 x half> undef, <8 x i32> zeroinitializer
   ret <8 x half> %2
 }
 
 ; Load to one lane of v4f16
-define <4 x half> @load_lane_64(half* nocapture readonly %a, <4 x half> %b) #0 {
+define <4 x half> @load_lane_64(ptr nocapture readonly %a, <4 x half> %b) #0 {
 ; CHECK-LABEL: load_lane_64:
 ; CHECK: ld1 { v0.h }[2], [x0]
 entry:
-  %0 = load half, half* %a, align 2
+  %0 = load half, ptr %a, align 2
   %1 = insertelement <4 x half> %b, half %0, i32 2
   ret <4 x half> %1
 }
 
 ; Load to one lane of v8f16
-define <8 x half> @load_lane_128(half* nocapture readonly %a, <8 x half> %b) #0 {
+define <8 x half> @load_lane_128(ptr nocapture readonly %a, <8 x half> %b) #0 {
 ; CHECK-LABEL: load_lane_128:
 ; CHECK: ld1 { v0.h }[5], [x0]
 entry:
-  %0 = load half, half* %a, align 2
+  %0 = load half, ptr %a, align 2
   %1 = insertelement <8 x half> %b, half %0, i32 5
   ret <8 x half> %1
 }
 
 ; Simple store of v4f16
-define void @store_64(<4 x half>* nocapture %a, <4 x half> %b) #1 {
+define void @store_64(ptr nocapture %a, <4 x half> %b) #1 {
 ; CHECK-LABEL: store_64:
 ; CHECK: str d0, [x0]
 entry:
-  store <4 x half> %b, <4 x half>* %a, align 8
+  store <4 x half> %b, ptr %a, align 8
   ret void
 }
 
 ; Simple store of v8f16
-define void @store_128(<8 x half>* nocapture %a, <8 x half> %b) #1 {
+define void @store_128(ptr nocapture %a, <8 x half> %b) #1 {
 ; CHECK-LABEL: store_128:
 ; CHECK: str q0, [x0]
 entry:
-  store <8 x half> %b, <8 x half>* %a, align 16
+  store <8 x half> %b, ptr %a, align 16
   ret void
 }
 
 ; Store from one lane of v4f16
-define void @store_lane_64(half* nocapture %a, <4 x half> %b) #1 {
+define void @store_lane_64(ptr nocapture %a, <4 x half> %b) #1 {
 ; CHECK-LABEL: store_lane_64:
 ; CHECK: st1 { v0.h }[2], [x0]
 entry:
   %0 = extractelement <4 x half> %b, i32 2
-  store half %0, half* %a, align 2
+  store half %0, ptr %a, align 2
   ret void
 }
 
-define void @store_lane0_64(half* nocapture %a, <4 x half> %b) #1 {
+define void @store_lane0_64(ptr nocapture %a, <4 x half> %b) #1 {
 ; CHECK-LABEL: store_lane0_64:
 ; CHECK: str h0, [x0]
 entry:
   %0 = extractelement <4 x half> %b, i32 0
-  store half %0, half* %a, align 2
+  store half %0, ptr %a, align 2
   ret void
 }
 
-define void @storeu_lane0_64(half* nocapture %a, <4 x half> %b) #1 {
+define void @storeu_lane0_64(ptr nocapture %a, <4 x half> %b) #1 {
 ; CHECK-LABEL: storeu_lane0_64:
 ; CHECK: stur h0, [x{{[0-9]+}}, #-2]
 entry:
-  %0 = getelementptr half, half* %a, i64 -1
+  %0 = getelementptr half, ptr %a, i64 -1
   %1 = extractelement <4 x half> %b, i32 0
-  store half %1, half* %0, align 2
+  store half %1, ptr %0, align 2
   ret void
 }
 
-define void @storero_lane_64(half* nocapture %a, <4 x half> %b, i64 %c) #1 {
+define void @storero_lane_64(ptr nocapture %a, <4 x half> %b, i64 %c) #1 {
 ; CHECK-LABEL: storero_lane_64:
 ; CHECK: st1 { v0.h }[2], [x{{[0-9]+}}]
 entry:
-  %0 = getelementptr half, half* %a, i64 %c
+  %0 = getelementptr half, ptr %a, i64 %c
   %1 = extractelement <4 x half> %b, i32 2
-  store half %1, half* %0, align 2
+  store half %1, ptr %0, align 2
   ret void
 }
 
-define void @storero_lane0_64(half* nocapture %a, <4 x half> %b, i64 %c) #1 {
+define void @storero_lane0_64(ptr nocapture %a, <4 x half> %b, i64 %c) #1 {
 ; CHECK-LABEL: storero_lane0_64:
 ; CHECK: str h0, [x0, x1, lsl #1]
 entry:
-  %0 = getelementptr half, half* %a, i64 %c
+  %0 = getelementptr half, ptr %a, i64 %c
   %1 = extractelement <4 x half> %b, i32 0
-  store half %1, half* %0, align 2
+  store half %1, ptr %0, align 2
   ret void
 }
 
 ; Store from one lane of v8f16
-define void @store_lane_128(half* nocapture %a, <8 x half> %b) #1 {
+define void @store_lane_128(ptr nocapture %a, <8 x half> %b) #1 {
 ; CHECK-LABEL: store_lane_128:
 ; CHECK: st1 { v0.h }[5], [x0]
 entry:
   %0 = extractelement <8 x half> %b, i32 5
-  store half %0, half* %a, align 2
+  store half %0, ptr %a, align 2
   ret void
 }
 
-define void @store_lane0_128(half* nocapture %a, <8 x half> %b) #1 {
+define void @store_lane0_128(ptr nocapture %a, <8 x half> %b) #1 {
 ; CHECK-LABEL: store_lane0_128:
 ; CHECK: str h0, [x0]
 entry:
   %0 = extractelement <8 x half> %b, i32 0
-  store half %0, half* %a, align 2
+  store half %0, ptr %a, align 2
   ret void
 }
 
-define void @storeu_lane0_128(half* nocapture %a, <8 x half> %b) #1 {
+define void @storeu_lane0_128(ptr nocapture %a, <8 x half> %b) #1 {
 ; CHECK-LABEL: storeu_lane0_128:
 ; CHECK: stur h0, [x{{[0-9]+}}, #-2]
 entry:
-  %0 = getelementptr half, half* %a, i64 -1
+  %0 = getelementptr half, ptr %a, i64 -1
   %1 = extractelement <8 x half> %b, i32 0
-  store half %1, half* %0, align 2
+  store half %1, ptr %0, align 2
   ret void
 }
 
-define void @storero_lane_128(half* nocapture %a, <8 x half> %b, i64 %c) #1 {
+define void @storero_lane_128(ptr nocapture %a, <8 x half> %b, i64 %c) #1 {
 ; CHECK-LABEL: storero_lane_128:
 ; CHECK: st1 { v0.h }[4], [x{{[0-9]+}}]
 entry:
-  %0 = getelementptr half, half* %a, i64 %c
+  %0 = getelementptr half, ptr %a, i64 %c
   %1 = extractelement <8 x half> %b, i32 4
-  store half %1, half* %0, align 2
+  store half %1, ptr %0, align 2
   ret void
 }
 
-define void @storero_lane0_128(half* nocapture %a, <8 x half> %b, i64 %c) #1 {
+define void @storero_lane0_128(ptr nocapture %a, <8 x half> %b, i64 %c) #1 {
 ; CHECK-LABEL: storero_lane0_128:
 ; CHECK: str h0, [x0, x1, lsl #1]
 entry:
-  %0 = getelementptr half, half* %a, i64 %c
+  %0 = getelementptr half, ptr %a, i64 %c
   %1 = extractelement <8 x half> %b, i32 0
-  store half %1, half* %0, align 2
+  store half %1, ptr %0, align 2
   ret void
 }
 
 ; NEON intrinsics - (de-)interleaving loads and stores
-declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>*)
-declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x half>*)
-declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0v4f16(<4 x half>*)
-declare void @llvm.aarch64.neon.st2.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>*)
-declare void @llvm.aarch64.neon.st3.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>*)
-declare void @llvm.aarch64.neon.st4.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, <4 x half>*)
-declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>*)
-declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>*)
-declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8 x half>*)
-declare void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*)
-declare void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*)
-declare void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <8 x half>*)
+declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0(ptr)
+declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0(ptr)
+declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0(ptr)
+declare void @llvm.aarch64.neon.st2.v4f16.p0(<4 x half>, <4 x half>, ptr)
+declare void @llvm.aarch64.neon.st3.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, ptr)
+declare void @llvm.aarch64.neon.st4.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, <4 x half>, ptr)
+declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0(ptr)
+declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0(ptr)
+declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0(ptr)
+declare void @llvm.aarch64.neon.st2.v8f16.p0(<8 x half>, <8 x half>, ptr)
+declare void @llvm.aarch64.neon.st3.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, ptr)
+declare void @llvm.aarch64.neon.st4.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, <8 x half>, ptr)
 
 ; Load 2 x v4f16 with de-interleaving
-define { <4 x half>, <4 x half> } @load_interleave_64_2(<4 x half>* %a) #0 {
+define { <4 x half>, <4 x half> } @load_interleave_64_2(ptr %a) #0 {
 ; CHECK-LABEL: load_interleave_64_2:
 ; CHECK: ld2 { v0.4h, v1.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>* %a)
+  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half> } %0
 }
 
 ; Load 3 x v4f16 with de-interleaving
-define { <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_3(<4 x half>* %a) #0 {
+define { <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_3(ptr %a) #0 {
 ; CHECK-LABEL: load_interleave_64_3:
 ; CHECK: ld3 { v0.4h, v1.4h, v2.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x half>* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Load 4 x v4f16 with de-interleaving
-define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_4(<4 x half>* %a) #0 {
+define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_4(ptr %a) #0 {
 ; CHECK-LABEL: load_interleave_64_4:
 ; CHECK: ld4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0v4f16(<4 x half>* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Store 2 x v4f16 with interleaving
-define void @store_interleave_64_2(<4 x half>* %a, <4 x half> %b, <4 x half> %c) #0 {
+define void @store_interleave_64_2(ptr %a, <4 x half> %b, <4 x half> %c) #0 {
 ; CHECK-LABEL: store_interleave_64_2:
 ; CHECK: st2 { v0.4h, v1.4h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st2.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half>* %a)
+  tail call void @llvm.aarch64.neon.st2.v4f16.p0(<4 x half> %b, <4 x half> %c, ptr %a)
   ret void
 }
 
 ; Store 3 x v4f16 with interleaving
-define void @store_interleave_64_3(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
+define void @store_interleave_64_3(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
 ; CHECK-LABEL: store_interleave_64_3:
 ; CHECK: st3 { v0.4h, v1.4h, v2.4h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st3.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half>* %a)
+  tail call void @llvm.aarch64.neon.st3.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, ptr %a)
   ret void
 }
 
 ; Store 4 x v4f16 with interleaving
-define void @store_interleave_64_4(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
+define void @store_interleave_64_4(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
 ; CHECK-LABEL: store_interleave_64_4:
 ; CHECK: st4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st4.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, <4 x half>* %a)
+  tail call void @llvm.aarch64.neon.st4.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, ptr %a)
   ret void
 }
 
 ; Load 2 x v8f16 with de-interleaving
-define { <8 x half>, <8 x half> } @load_interleave_128_2(<8 x half>* %a) #0 {
+define { <8 x half>, <8 x half> } @load_interleave_128_2(ptr %a) #0 {
 ; CHECK-LABEL: load_interleave_128_2:
 ; CHECK: ld2 { v0.8h, v1.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>* %a)
+  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half> } %0
 }
 
 ; Load 3 x v8f16 with de-interleaving
-define { <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_3(<8 x half>* %a) #0 {
+define { <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_3(ptr %a) #0 {
 ; CHECK-LABEL: load_interleave_128_3:
 ; CHECK: ld3 { v0.8h, v1.8h, v2.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 ; Load 8 x v8f16 with de-interleaving
-define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_4(<8 x half>* %a) #0 {
+define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_4(ptr %a) #0 {
 ; CHECK-LABEL: load_interleave_128_4:
 ; CHECK: ld4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8 x half>* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 ; Store 2 x v8f16 with interleaving
-define void @store_interleave_128_2(<8 x half>* %a, <8 x half> %b, <8 x half> %c) #0 {
+define void @store_interleave_128_2(ptr %a, <8 x half> %b, <8 x half> %c) #0 {
 ; CHECK-LABEL: store_interleave_128_2:
 ; CHECK: st2 { v0.8h, v1.8h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half>* %a)
+  tail call void @llvm.aarch64.neon.st2.v8f16.p0(<8 x half> %b, <8 x half> %c, ptr %a)
   ret void
 }
 
 ; Store 3 x v8f16 with interleaving
-define void @store_interleave_128_3(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
+define void @store_interleave_128_3(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
 ; CHECK-LABEL: store_interleave_128_3:
 ; CHECK: st3 { v0.8h, v1.8h, v2.8h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half>* %a)
+  tail call void @llvm.aarch64.neon.st3.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, ptr %a)
   ret void
 }
 
 ; Store 8 x v8f16 with interleaving
-define void @store_interleave_128_4(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
+define void @store_interleave_128_4(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
 ; CHECK-LABEL: store_interleave_128_4:
 ; CHECK: st4 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, <8 x half>* %a)
+  tail call void @llvm.aarch64.neon.st4.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, ptr %a)
   ret void
 }
 
 ; NEON intrinsics - duplicating loads
-declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0f16(half*)
-declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0f16(half*)
-declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0f16(half*)
-declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0f16(half*)
-declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0f16(half*)
-declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0f16(half*)
+declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0(ptr)
+declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0(ptr)
+declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0(ptr)
+declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0(ptr)
+declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0(ptr)
+declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0(ptr)
 
 ; Load 2 x v4f16 with duplication
-define { <4 x half>, <4 x half> } @load_dup_64_2(half* %a) #0 {
+define { <4 x half>, <4 x half> } @load_dup_64_2(ptr %a) #0 {
 ; CHECK-LABEL: load_dup_64_2:
 ; CHECK: ld2r { v0.4h, v1.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0f16(half* %a)
+  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half> } %0
 }
 
 ; Load 3 x v4f16 with duplication
-define { <4 x half>, <4 x half>, <4 x half> } @load_dup_64_3(half* %a) #0 {
+define { <4 x half>, <4 x half>, <4 x half> } @load_dup_64_3(ptr %a) #0 {
 ; CHECK-LABEL: load_dup_64_3:
 ; CHECK: ld3r { v0.4h, v1.4h, v2.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0f16(half* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Load 4 x v4f16 with duplication
-define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_dup_64_4(half* %a) #0 {
+define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_dup_64_4(ptr %a) #0 {
 ; CHECK-LABEL: load_dup_64_4:
 ; CHECK: ld4r { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0f16(half* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Load 2 x v8f16 with duplication
-define { <8 x half>, <8 x half> } @load_dup_128_2(half* %a) #0 {
+define { <8 x half>, <8 x half> } @load_dup_128_2(ptr %a) #0 {
 ; CHECK-LABEL: load_dup_128_2:
 ; CHECK: ld2r { v0.8h, v1.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0f16(half* %a)
+  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half> } %0
 }
 
 ; Load 3 x v8f16 with duplication
-define { <8 x half>, <8 x half>, <8 x half> } @load_dup_128_3(half* %a) #0 {
+define { <8 x half>, <8 x half>, <8 x half> } @load_dup_128_3(ptr %a) #0 {
 ; CHECK-LABEL: load_dup_128_3:
 ; CHECK: ld3r { v0.8h, v1.8h, v2.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0f16(half* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 ; Load 8 x v8f16 with duplication
-define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_dup_128_4(half* %a) #0 {
+define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_dup_128_4(ptr %a) #0 {
 ; CHECK-LABEL: load_dup_128_4:
 ; CHECK: ld4r { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0f16(half* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 
 ; NEON intrinsics - loads and stores to/from one lane
-declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0f16(<4 x half>, <4 x half>, i64, half*)
-declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, i64, half*)
-declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, i64, half*)
-declare void @llvm.aarch64.neon.st2lane.v4f16.p0f16(<4 x half>, <4 x half>, i64, half*)
-declare void @llvm.aarch64.neon.st3lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, i64, half*)
-declare void @llvm.aarch64.neon.st4lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, i64, half*)
-declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0f16(<8 x half>, <8 x half>, i64, half*)
-declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, i64, half*)
-declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, i64, half*)
-declare void @llvm.aarch64.neon.st2lane.v8f16.p0f16(<8 x half>, <8 x half>, i64, half*)
-declare void @llvm.aarch64.neon.st3lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, i64, half*)
-declare void @llvm.aarch64.neon.st4lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, i64, half*)
+declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0(<4 x half>, <4 x half>, i64, ptr)
+declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, i64, ptr)
+declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, <4 x half>, i64, ptr)
+declare void @llvm.aarch64.neon.st2lane.v4f16.p0(<4 x half>, <4 x half>, i64, ptr)
+declare void @llvm.aarch64.neon.st3lane.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, i64, ptr)
+declare void @llvm.aarch64.neon.st4lane.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, <4 x half>, i64, ptr)
+declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0(<8 x half>, <8 x half>, i64, ptr)
+declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, i64, ptr)
+declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, <8 x half>, i64, ptr)
+declare void @llvm.aarch64.neon.st2lane.v8f16.p0(<8 x half>, <8 x half>, i64, ptr)
+declare void @llvm.aarch64.neon.st3lane.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, i64, ptr)
+declare void @llvm.aarch64.neon.st4lane.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, <8 x half>, i64, ptr)
 
 ; Load one lane of 2 x v4f16
-define { <4 x half>, <4 x half> } @load_lane_64_2(half* %a, <4 x half> %b, <4 x half> %c) #0 {
+define { <4 x half>, <4 x half> } @load_lane_64_2(ptr %a, <4 x half> %b, <4 x half> %c) #0 {
 ; CHECK-LABEL: load_lane_64_2:
 ; CHECK: ld2 { v0.h, v1.h }[2], [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, i64 2, half* %a)
+  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0(<4 x half> %b, <4 x half> %c, i64 2, ptr %a)
   ret { <4 x half>, <4 x half> } %0
 }
 
 ; Load one lane of 3 x v4f16
-define { <4 x half>, <4 x half>, <4 x half> } @load_lane_64_3(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
+define { <4 x half>, <4 x half>, <4 x half> } @load_lane_64_3(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
 ; CHECK-LABEL: load_lane_64_3:
 ; CHECK: ld3 { v0.h, v1.h, v2.h }[2], [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, i64 2, half* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, i64 2, ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Load one lane of 4 x v4f16
-define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_lane_64_4(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
+define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_lane_64_4(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
 ; CHECK-LABEL: load_lane_64_4:
 ; CHECK: ld4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, i64 2, half* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, i64 2, ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Store one lane of 2 x v4f16
-define void @store_lane_64_2(half* %a, <4 x half> %b, <4 x half> %c) #0 {
+define void @store_lane_64_2(ptr %a, <4 x half> %b, <4 x half> %c) #0 {
 ; CHECK-LABEL: store_lane_64_2:
 ; CHECK: st2 { v0.h, v1.h }[2], [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st2lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, i64 2, half* %a)
+  tail call void @llvm.aarch64.neon.st2lane.v4f16.p0(<4 x half> %b, <4 x half> %c, i64 2, ptr %a)
   ret void
 }
 
 ; Store one lane of 3 x v4f16
-define void @store_lane_64_3(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
+define void @store_lane_64_3(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
 ; CHECK-LABEL: store_lane_64_3:
 ; CHECK: st3 { v0.h, v1.h, v2.h }[2], [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st3lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, i64 2, half* %a)
+  tail call void @llvm.aarch64.neon.st3lane.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, i64 2, ptr %a)
   ret void
 }
 
 ; Store one lane of 4 x v4f16
-define void @store_lane_64_4(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
+define void @store_lane_64_4(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
 ; CHECK-LABEL: store_lane_64_4:
 ; CHECK: st4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st4lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, i64 2, half* %a)
+  tail call void @llvm.aarch64.neon.st4lane.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, i64 2, ptr %a)
   ret void
 }
 
 ; Load one lane of 2 x v8f16
-define { <8 x half>, <8 x half> } @load_lane_128_2(half* %a, <8 x half> %b, <8 x half> %c) #0 {
+define { <8 x half>, <8 x half> } @load_lane_128_2(ptr %a, <8 x half> %b, <8 x half> %c) #0 {
 ; CHECK-LABEL: load_lane_128_2:
 ; CHECK: ld2 { v0.h, v1.h }[2], [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, i64 2, half* %a)
+  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0(<8 x half> %b, <8 x half> %c, i64 2, ptr %a)
   ret { <8 x half>, <8 x half> } %0
 }
 
 ; Load one lane of 3 x v8f16
-define { <8 x half>, <8 x half>, <8 x half> } @load_lane_128_3(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
+define { <8 x half>, <8 x half>, <8 x half> } @load_lane_128_3(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
 ; CHECK-LABEL: load_lane_128_3:
 ; CHECK: ld3 { v0.h, v1.h, v2.h }[2], [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, i64 2, half* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, i64 2, ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 ; Load one lane of 8 x v8f16
-define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_lane_128_4(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
+define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_lane_128_4(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
 ; CHECK-LABEL: load_lane_128_4:
 ; CHECK: ld4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, i64 2, half* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, i64 2, ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 ; Store one lane of 2 x v8f16
-define void @store_lane_128_2(half* %a, <8 x half> %b, <8 x half> %c) #0 {
+define void @store_lane_128_2(ptr %a, <8 x half> %b, <8 x half> %c) #0 {
 ; CHECK-LABEL: store_lane_128_2:
 ; CHECK: st2 { v0.h, v1.h }[2], [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st2lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, i64 2, half* %a)
+  tail call void @llvm.aarch64.neon.st2lane.v8f16.p0(<8 x half> %b, <8 x half> %c, i64 2, ptr %a)
   ret void
 }
 
 ; Store one lane of 3 x v8f16
-define void @store_lane_128_3(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
+define void @store_lane_128_3(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
 ; CHECK-LABEL: store_lane_128_3:
 ; CHECK: st3 { v0.h, v1.h, v2.h }[2], [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st3lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, i64 2, half* %a)
+  tail call void @llvm.aarch64.neon.st3lane.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, i64 2, ptr %a)
   ret void
 }
 
 ; Store one lane of 8 x v8f16
-define void @store_lane_128_4(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
+define void @store_lane_128_4(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
 ; CHECK-LABEL: store_lane_128_4:
 ; CHECK: st4 { v0.h, v1.h, v2.h, v3.h }[2], [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st4lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, i64 2, half* %a)
+  tail call void @llvm.aarch64.neon.st4lane.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, i64 2, ptr %a)
   ret void
 }
 
 ; NEON intrinsics - load/store without interleaving
-declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0v4f16(<4 x half>*)
-declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0v4f16(<4 x half>*)
-declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0v4f16(<4 x half>*)
-declare void @llvm.aarch64.neon.st1x2.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>*)
-declare void @llvm.aarch64.neon.st1x3.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>*)
-declare void @llvm.aarch64.neon.st1x4.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, <4 x half>*)
-declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0v8f16(<8 x half>*)
-declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0v8f16(<8 x half>*)
-declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0v8f16(<8 x half>*)
-declare void @llvm.aarch64.neon.st1x2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*)
-declare void @llvm.aarch64.neon.st1x3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*)
-declare void @llvm.aarch64.neon.st1x4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <8 x half>*)
+declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0(ptr)
+declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0(ptr)
+declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0(ptr)
+declare void @llvm.aarch64.neon.st1x2.v4f16.p0(<4 x half>, <4 x half>, ptr)
+declare void @llvm.aarch64.neon.st1x3.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, ptr)
+declare void @llvm.aarch64.neon.st1x4.v4f16.p0(<4 x half>, <4 x half>, <4 x half>, <4 x half>, ptr)
+declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0(ptr)
+declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0(ptr)
+declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0(ptr)
+declare void @llvm.aarch64.neon.st1x2.v8f16.p0(<8 x half>, <8 x half>, ptr)
+declare void @llvm.aarch64.neon.st1x3.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, ptr)
+declare void @llvm.aarch64.neon.st1x4.v8f16.p0(<8 x half>, <8 x half>, <8 x half>, <8 x half>, ptr)
 
 ; Load 2 x v4f16 without de-interleaving
-define { <4 x half>, <4 x half> } @load_64_2(<4 x half>* %a) #0 {
+define { <4 x half>, <4 x half> } @load_64_2(ptr %a) #0 {
 ; CHECK-LABEL: load_64_2:
 ; CHECK: ld1 { v0.4h, v1.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0v4f16(<4 x half>* %a)
+  %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half> } %0
 }
 
 ; Load 3 x v4f16 without de-interleaving
-define { <4 x half>, <4 x half>, <4 x half> } @load_64_3(<4 x half>* %a) #0 {
+define { <4 x half>, <4 x half>, <4 x half> } @load_64_3(ptr %a) #0 {
 ; CHECK-LABEL: load_64_3:
 ; CHECK: ld1 { v0.4h, v1.4h, v2.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0v4f16(<4 x half>* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Load 4 x v4f16 without de-interleaving
-define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_64_4(<4 x half>* %a) #0 {
+define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_64_4(ptr %a) #0 {
 ; CHECK-LABEL: load_64_4:
 ; CHECK: ld1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 entry:
-  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0v4f16(<4 x half>* %a)
+  %0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0(ptr %a)
   ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
 }
 
 ; Store 2 x v4f16 without interleaving
-define void @store_64_2(<4 x half>* %a, <4 x half> %b, <4 x half> %c) #0 {
+define void @store_64_2(ptr %a, <4 x half> %b, <4 x half> %c) #0 {
 ; CHECK-LABEL: store_64_2:
 ; CHECK: st1 { v0.4h, v1.4h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st1x2.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half>* %a)
+  tail call void @llvm.aarch64.neon.st1x2.v4f16.p0(<4 x half> %b, <4 x half> %c, ptr %a)
   ret void
 }
 
 ; Store 3 x v4f16 without interleaving
-define void @store_64_3(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
+define void @store_64_3(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
 ; CHECK-LABEL: store_64_3:
 ; CHECK: st1 { v0.4h, v1.4h, v2.4h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st1x3.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half>* %a)
+  tail call void @llvm.aarch64.neon.st1x3.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, ptr %a)
   ret void
 }
 
 ; Store 4 x v4f16 without interleaving
-define void @store_64_4(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
+define void @store_64_4(ptr %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) #0 {
 ; CHECK-LABEL: store_64_4:
 ; CHECK: st1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st1x4.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, <4 x half>* %a)
+  tail call void @llvm.aarch64.neon.st1x4.v4f16.p0(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e, ptr %a)
   ret void
 }
 
 ; Load 2 x v8f16 without de-interleaving
-define { <8 x half>, <8 x half> } @load_128_2(<8 x half>* %a) #0 {
+define { <8 x half>, <8 x half> } @load_128_2(ptr %a) #0 {
 ; CHECK-LABEL: load_128_2:
 ; CHECK: ld1 { v0.8h, v1.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0v8f16(<8 x half>* %a)
+  %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half> } %0
 }
 
 ; Load 3 x v8f16 without de-interleaving
-define { <8 x half>, <8 x half>, <8 x half> } @load_128_3(<8 x half>* %a) #0 {
+define { <8 x half>, <8 x half>, <8 x half> } @load_128_3(ptr %a) #0 {
 ; CHECK-LABEL: load_128_3:
 ; CHECK: ld1 { v0.8h, v1.8h, v2.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0v8f16(<8 x half>* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 ; Load 8 x v8f16 without de-interleaving
-define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_128_4(<8 x half>* %a) #0 {
+define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_128_4(ptr %a) #0 {
 ; CHECK-LABEL: load_128_4:
 ; CHECK: ld1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 entry:
-  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0v8f16(<8 x half>* %a)
+  %0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0(ptr %a)
   ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
 }
 
 ; Store 2 x v8f16 without interleaving
-define void @store_128_2(<8 x half>* %a, <8 x half> %b, <8 x half> %c) #0 {
+define void @store_128_2(ptr %a, <8 x half> %b, <8 x half> %c) #0 {
 ; CHECK-LABEL: store_128_2:
 ; CHECK: st1 { v0.8h, v1.8h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st1x2.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half>* %a)
+  tail call void @llvm.aarch64.neon.st1x2.v8f16.p0(<8 x half> %b, <8 x half> %c, ptr %a)
   ret void
 }
 
 ; Store 3 x v8f16 without interleaving
-define void @store_128_3(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
+define void @store_128_3(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
 ; CHECK-LABEL: store_128_3:
 ; CHECK: st1 { v0.8h, v1.8h, v2.8h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st1x3.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half>* %a)
+  tail call void @llvm.aarch64.neon.st1x3.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, ptr %a)
   ret void
 }
 
 ; Store 8 x v8f16 without interleaving
-define void @store_128_4(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
+define void @store_128_4(ptr %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e) #0 {
 ; CHECK-LABEL: store_128_4:
 ; CHECK: st1 { v0.8h, v1.8h, v2.8h, v3.8h }, [x0]
 entry:
-  tail call void @llvm.aarch64.neon.st1x4.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, <8 x half>* %a)
+  tail call void @llvm.aarch64.neon.st1x4.v8f16.p0(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e, ptr %a)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/fp16-vector-nvcast.ll b/llvm/test/CodeGen/AArch64/fp16-vector-nvcast.ll
index 018c88c5f3ebb..3101c16f1e373 100644
--- a/llvm/test/CodeGen/AArch64/fp16-vector-nvcast.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-vector-nvcast.ll
@@ -1,88 +1,88 @@
 ; RUN: llc < %s -asm-verbose=false -mtriple=aarch64-none-eabi | FileCheck %s
 
 ; Test pattern (v4f16 (AArch64NvCast (v2i32 FPR64:$src)))
-define void @nvcast_v2i32(<4 x half>* %a) #0 {
+define void @nvcast_v2i32(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_v2i32:
 ; CHECK-NEXT: movi v[[REG:[0-9]+]].2s, #171, lsl #16
 ; CHECK-NEXT: str d[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <4 x half> <half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB>, <4 x half>* %a
+  store volatile <4 x half> <half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB>, ptr %a
   ret void
 }
 
 
 ; Test pattern (v4f16 (AArch64NvCast (v4i16 FPR64:$src)))
-define void @nvcast_v4i16(<4 x half>* %a) #0 {
+define void @nvcast_v4i16(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_v4i16:
 ; CHECK-NEXT: movi v[[REG:[0-9]+]].4h, #171
 ; CHECK-NEXT: str d[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <4 x half> <half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB>, <4 x half>* %a
+  store volatile <4 x half> <half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB>, ptr %a
   ret void
 }
 
 
 ; Test pattern (v4f16 (AArch64NvCast (v8i8 FPR64:$src)))
-define void @nvcast_v8i8(<4 x half>* %a) #0 {
+define void @nvcast_v8i8(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_v8i8:
 ; CHECK-NEXT: movi v[[REG:[0-9]+]].8b, #171
 ; CHECK-NEXT: str d[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <4 x half> <half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB>, <4 x half>* %a
+  store volatile <4 x half> <half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB>, ptr %a
   ret void
 }
 
 
 ; Test pattern (v4f16 (AArch64NvCast (f64 FPR64:$src)))
-define void @nvcast_f64(<4 x half>* %a) #0 {
+define void @nvcast_f64(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_f64:
 ; CHECK-NEXT: movi d[[REG:[0-9]+]], #0000000000000000
 ; CHECK-NEXT: str d[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <4 x half> zeroinitializer, <4 x half>* %a
+  store volatile <4 x half> zeroinitializer, ptr %a
   ret void
 }
 
 ; Test pattern (v8f16 (AArch64NvCast (v4i32 FPR128:$src)))
-define void @nvcast_v4i32(<8 x half>* %a) #0 {
+define void @nvcast_v4i32(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_v4i32:
 ; CHECK-NEXT: movi v[[REG:[0-9]+]].4s, #171, lsl #16
 ; CHECK-NEXT: str q[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <8 x half> <half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB>, <8 x half>* %a
+  store volatile <8 x half> <half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB, half 0xH0000, half 0xH00AB>, ptr %a
   ret void
 }
 
 
 ; Test pattern (v8f16 (AArch64NvCast (v8i16 FPR128:$src)))
-define void @nvcast_v8i16(<8 x half>* %a) #0 {
+define void @nvcast_v8i16(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_v8i16:
 ; CHECK-NEXT: movi v[[REG:[0-9]+]].8h, #171
 ; CHECK-NEXT: str q[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <8 x half> <half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB>, <8 x half>* %a
+  store volatile <8 x half> <half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB, half 0xH00AB>, ptr %a
   ret void
 }
 
 
 ; Test pattern (v8f16 (AArch64NvCast (v16i8 FPR128:$src)))
-define void @nvcast_v16i8(<8 x half>* %a) #0 {
+define void @nvcast_v16i8(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_v16i8:
 ; CHECK-NEXT: movi v[[REG:[0-9]+]].16b, #171
 ; CHECK-NEXT: str q[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <8 x half> <half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB>, <8 x half>* %a
+  store volatile <8 x half> <half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB, half 0xHABAB>, ptr %a
   ret void
 }
 
 
 ; Test pattern (v8f16 (AArch64NvCast (v2i64 FPR128:$src)))
-define void @nvcast_v2i64(<8 x half>* %a) #0 {
+define void @nvcast_v2i64(ptr %a) #0 {
 ; CHECK-LABEL: nvcast_v2i64:
 ; CHECK-NEXT: movi v[[REG:[0-9]+]].2d, #0000000000000000
 ; CHECK-NEXT: str q[[REG]], [x0]
 ; CHECK-NEXT: ret
-  store volatile <8 x half> zeroinitializer, <8 x half>* %a
+  store volatile <8 x half> zeroinitializer, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index 10233ded32362..58d79d3e5998e 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -8,13 +8,13 @@
 define void @check_float() {
 ; CHECK-LABEL: check_float:
 
-  %val = load float, float* @varf32
+  %val = load float, ptr @varf32
   %newval1 = fadd float %val, 8.5
-  store volatile float %newval1, float* @varf32
+  store volatile float %newval1, ptr @varf32
 ; CHECK-DAG: fmov {{s[0-9]+}}, #8.5
 
   %newval2 = fadd float %val, 128.0
-  store volatile float %newval2, float* @varf32
+  store volatile float %newval2, ptr @varf32
 ; CHECK-DAG: movi [[REG:v[0-9s]+]].2s, #67, lsl #24
 
 ; CHECK: ret
@@ -24,13 +24,13 @@ define void @check_float() {
 define void @check_double() {
 ; CHECK-LABEL: check_double:
 
-  %val = load double, double* @varf64
+  %val = load double, ptr @varf64
   %newval1 = fadd double %val, 8.5
-  store volatile double %newval1, double* @varf64
+  store volatile double %newval1, ptr @varf64
 ; CHECK-DAG: fmov {{d[0-9]+}}, #8.5
 
   %newval2 = fadd double %val, 128.0
-  store volatile double %newval2, double* @varf64
+  store volatile double %newval2, ptr @varf64
 ; CHECK-DAG: mov [[X128:x[0-9]+]], #4638707616191610880
 ; CHECK-DAG: fmov {{d[0-9]+}}, [[X128]]
 
@@ -39,7 +39,7 @@ define void @check_double() {
 ; CHECK-DAG: movk [[XFP0]], #64764, lsl #16
 ; CHECk-DAG: fmov {{d[0-9]+}}, [[XFP0]]
   %newval3 = fadd double %val, 0xFCFCFC00FC
-  store volatile double %newval3, double* @varf64
+  store volatile double %newval3, ptr @varf64
 
 ; CHECK: ret
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/frameaddr.ll b/llvm/test/CodeGen/AArch64/frameaddr.ll
index d965809d875e4..69eec1ec2b405 100644
--- a/llvm/test/CodeGen/AArch64/frameaddr.ll
+++ b/llvm/test/CodeGen/AArch64/frameaddr.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=aarch64-apple-darwin                             -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s
 
-define i8* @test_frameaddress0() nounwind {
+define ptr @test_frameaddress0() nounwind {
 entry:
 ; CHECK-LABEL: test_frameaddress0:
 ; CHECK: stp x29, x30, [sp, #-16]!
@@ -9,11 +9,11 @@ entry:
 ; CHECK: mov x0, x29
 ; CHECK: ldp x29, x30, [sp], #16
 ; CHECK: ret
-  %0 = call i8* @llvm.frameaddress(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.frameaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @test_frameaddress2() nounwind {
+define ptr @test_frameaddress2() nounwind {
 entry:
 ; CHECK-LABEL: test_frameaddress2:
 ; CHECK: stp x29, x30, [sp, #-16]!
@@ -22,8 +22,8 @@ entry:
 ; CHECK: ldr x0, [x[[reg]]]
 ; CHECK: ldp x29, x30, [sp], #16
 ; CHECK: ret
-  %0 = call i8* @llvm.frameaddress(i32 2)
-  ret i8* %0
+  %0 = call ptr @llvm.frameaddress(i32 2)
+  ret ptr %0
 }
 
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
index e68710652a509..c5cc10838ebe3 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
+++ b/llvm/test/CodeGen/AArch64/framelayout-unaligned-fp.ll
@@ -14,12 +14,12 @@ target triple = "aarch64-unknown-linux-gnu"
 define i64 @b() uwtable {
 entry:
   %call = tail call i64 @d()
-  %0 = alloca i8, i64 ptrtoint (i64 ()* @d to i64), align 16
-  %1 = ptrtoint i8* %0 to i64
-  store i64 %1, i64* @a, align 4
+  %0 = alloca i8, i64 ptrtoint (ptr @d to i64), align 16
+  %1 = ptrtoint ptr %0 to i64
+  store i64 %1, ptr @a, align 4
   %call1 = call i64 @e()
   %conv = sitofp i64 %call1 to float
-  %2 = load i64, i64* @a, align 4
+  %2 = load i64, ptr @a, align 4
   %call2 = call i64 @f(i64 %2)
   %conv3 = fptosi float %conv to i64
   ret i64 %conv3

diff  --git a/llvm/test/CodeGen/AArch64/free-zext.ll b/llvm/test/CodeGen/AArch64/free-zext.ll
index ea4f1f4e10f3e..d919ec245addf 100644
--- a/llvm/test/CodeGen/AArch64/free-zext.ll
+++ b/llvm/test/CodeGen/AArch64/free-zext.ll
@@ -1,46 +1,46 @@
 ; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
-define i64 @test_free_zext(i8* %a, i16* %b) {
+define i64 @test_free_zext(ptr %a, ptr %b) {
 ; CHECK-LABEL: test_free_zext:
 ; CHECK-DAG: ldrb w[[A:[0-9]+]], [x0]
 ; CHECK: ldrh w[[B:[0-9]+]], [x1]
 ; CHECK: add x0, x[[B]], x[[A]]
-  %1 = load i8, i8* %a, align 1
+  %1 = load i8, ptr %a, align 1
   %conv = zext i8 %1 to i64
-  %2 = load i16, i16* %b, align 2
+  %2 = load i16, ptr %b, align 2
   %conv1 = zext i16 %2 to i64
   %add = add nsw i64 %conv1, %conv
   ret i64 %add
 }
 
-define void @test_free_zext2(i32* %ptr, i32* %dst1, i64* %dst2) {
+define void @test_free_zext2(ptr %ptr, ptr %dst1, ptr %dst2) {
 ; CHECK-LABEL: test_free_zext2:
 ; CHECK: ldrh w[[A:[0-9]+]], [x0]
 ; CHECK-NOT: and x
 ; CHECK: str w[[A]], [x1]
 ; CHECK: str x[[A]], [x2]
-  %load = load i32, i32* %ptr, align 8
+  %load = load i32, ptr %ptr, align 8
   %load16 = and i32 %load, 65535
   %load64 = zext i32 %load16 to i64
-  store i32 %load16, i32* %dst1, align 4
-  store i64 %load64, i64* %dst2, align 8
+  store i32 %load16, ptr %dst1, align 4
+  store i64 %load64, ptr %dst2, align 8
   ret void
 }
 
 ; Test for CodeGenPrepare::optimizeLoadExt(): simple case: two loads
 ; feeding a phi that zext's each loaded value.
-define i32 @test_free_zext3(i32* %ptr, i32* %ptr2, i32* %dst, i32 %c) {
+define i32 @test_free_zext3(ptr %ptr, ptr %ptr2, ptr %dst, i32 %c) {
 ; CHECK-LABEL: test_free_zext3:
 bb1:
 ; CHECK: ldrh [[REG:w[0-9]+]]
 ; CHECK-NOT: and {{w[0-9]+}}, [[REG]], #0xffff
-  %tmp1 = load i32, i32* %ptr, align 4
+  %tmp1 = load i32, ptr %ptr, align 4
   %cmp = icmp ne i32 %c, 0
   br i1 %cmp, label %bb2, label %bb3
 bb2:
 ; CHECK: ldrh [[REG2:w[0-9]+]]
 ; CHECK-NOT: and {{w[0-9]+}}, [[REG2]], #0xffff
-  %tmp2 = load i32, i32* %ptr2, align 4
+  %tmp2 = load i32, ptr %ptr2, align 4
   br label %bb3
 bb3:
   %tmp3 = phi i32 [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
@@ -51,19 +51,19 @@ bb3:
 
 ; Test for CodeGenPrepare::optimizeLoadExt(): check case of zext-able
 ; load feeding a phi in the same block.
-define void @test_free_zext4(i32* %ptr, i32* %ptr2, i32* %dst) {
+define void @test_free_zext4(ptr %ptr, ptr %ptr2, ptr %dst) {
 ; CHECK-LABEL: test_free_zext4:
 ; CHECK: ldrh [[REG:w[0-9]+]]
 ; TODO: fix isel to remove final and XCHECK-NOT: and {{w[0-9]+}}, {{w[0-9]+}}, #0xffff
 ; CHECK: ldrh [[REG:w[0-9]+]]
 bb1:
-  %load1 = load i32, i32* %ptr, align 4
+  %load1 = load i32, ptr %ptr, align 4
   br label %loop
 loop:
   %phi = phi i32 [ %load1, %bb1 ], [ %load2, %loop ]
   %and = and i32 %phi, 65535
-  store i32 %and, i32* %dst, align 4
-  %load2 = load i32, i32* %ptr2, align 4
+  store i32 %and, ptr %dst, align 4
+  %load2 = load i32, ptr %ptr2, align 4
   %cmp = icmp ne i32 %and, 0
   br i1 %cmp, label %loop, label %end
 end:

diff  --git a/llvm/test/CodeGen/AArch64/func-argpassing.ll b/llvm/test/CodeGen/AArch64/func-argpassing.ll
index 0f9342adb32fa..9d10f6cb23583 100644
--- a/llvm/test/CodeGen/AArch64/func-argpassing.ll
+++ b/llvm/test/CodeGen/AArch64/func-argpassing.ll
@@ -13,7 +13,7 @@
 
 define dso_local void @take_i8s(i8 %val1, i8 %val2) {
 ; CHECK-LABEL: take_i8s:
-    store i8 %val2, i8* @var8
+    store i8 %val2, ptr @var8
     ; Not using w1 may be technically allowed, but it would indicate a
     ; problem in itself.
 ;  CHECK: strb w1, [{{x[0-9]+}}, {{#?}}:lo12:var8]
@@ -25,48 +25,46 @@ define dso_local void @add_floats(float %val1, float %val2) {
     %newval = fadd float %val1, %val2
 ; CHECK: fadd [[ADDRES:s[0-9]+]], s0, s1
 ; CHECK-NOFP-NOT: fadd
-    store float %newval, float* @varfloat
+    store float %newval, ptr @varfloat
 ; CHECK: str [[ADDRES]], [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
     ret void
 }
 
 ; byval pointers should be allocated to the stack and copied as if
 ; with memcpy.
-define dso_local void @take_struct(%myStruct* byval(%myStruct) %structval) {
+define dso_local void @take_struct(ptr byval(%myStruct) %structval) {
 ; CHECK-LABEL: take_struct:
-    %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
-    %addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0
+    %addr0 = getelementptr %myStruct, ptr %structval, i64 0, i32 2
 
-    %val0 = load volatile i32, i32* %addr0
+    %val0 = load volatile i32, ptr %addr0
     ; Some weird move means x0 is used for one access
 ; CHECK: ldr [[REG32:w[0-9]+]], [{{x[0-9]+|sp}}, #12]
-    store volatile i32 %val0, i32* @var32
+    store volatile i32 %val0, ptr @var32
 ; CHECK: str [[REG32]], [{{x[0-9]+}}, {{#?}}:lo12:var32]
 
-    %val1 = load volatile i64, i64* %addr1
+    %val1 = load volatile i64, ptr %structval
 ; CHECK: ldr [[REG64:x[0-9]+]], [{{x[0-9]+|sp}}]
-    store volatile i64 %val1, i64* @var64
+    store volatile i64 %val1, ptr @var64
 ; CHECK: str [[REG64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
 
     ret void
 }
 
 ; %structval should be at sp + 16
-define dso_local void @check_byval_align(i32* byval(i32) %ignore, %myStruct* byval(%myStruct) align 16 %structval) {
+define dso_local void @check_byval_align(ptr byval(i32) %ignore, ptr byval(%myStruct) align 16 %structval) {
 ; CHECK-LABEL: check_byval_align:
 
-    %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
-    %addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0
+    %addr0 = getelementptr %myStruct, ptr %structval, i64 0, i32 2
 
-    %val0 = load volatile i32, i32* %addr0
+    %val0 = load volatile i32, ptr %addr0
     ; Some weird move means x0 is used for one access
 ; CHECK: ldr [[REG32:w[0-9]+]], [sp, #28]
-    store i32 %val0, i32* @var32
+    store i32 %val0, ptr @var32
 ; CHECK: str [[REG32]], [{{x[0-9]+}}, {{#?}}:lo12:var32]
 
-    %val1 = load volatile i64, i64* %addr1
+    %val1 = load volatile i64, ptr %structval
 ; CHECK: ldr [[REG64:x[0-9]+]], [sp, #16]
-    store i64 %val1, i64* @var64
+    store i64 %val1, ptr @var64
 ; CHECK: str [[REG64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
 
     ret void
@@ -74,7 +72,7 @@ define dso_local void @check_byval_align(i32* byval(i32) %ignore, %myStruct* byv
 
 define dso_local i32 @return_int() {
 ; CHECK-LABEL: return_int:
-    %val = load i32, i32* @var32
+    %val = load i32, ptr @var32
     ret i32 %val
 ; CHECK: ldr w0, [{{x[0-9]+}}, {{#?}}:lo12:var32]
     ; Make sure epilogue follows
@@ -93,8 +91,7 @@ define dso_local double @return_double() {
 ; works.
 define [2 x i64] @return_struct() {
 ; CHECK-LABEL: return_struct:
-    %addr = bitcast %myStruct* @varstruct to [2 x i64]*
-    %val = load [2 x i64], [2 x i64]* %addr
+    %val = load [2 x i64], ptr @varstruct
     ret [2 x i64] %val
 ; CHECK: add x[[VARSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varstruct
 ; CHECK: ldp x0, x1, [x[[VARSTRUCT]]]
@@ -106,15 +103,14 @@ define [2 x i64] @return_struct() {
 ; to preserve value semantics) in x8. Strictly this only applies to
 ; structs larger than 16 bytes, but C semantics can still be provided
 ; if LLVM does it to %myStruct too. So this is the simplest check
-define dso_local void @return_large_struct(%myStruct* sret(%myStruct) %retval) {
+define dso_local void @return_large_struct(ptr sret(%myStruct) %retval) {
 ; CHECK-LABEL: return_large_struct:
-    %addr0 = getelementptr %myStruct, %myStruct* %retval, i64 0, i32 0
-    %addr1 = getelementptr %myStruct, %myStruct* %retval, i64 0, i32 1
-    %addr2 = getelementptr %myStruct, %myStruct* %retval, i64 0, i32 2
+    %addr1 = getelementptr %myStruct, ptr %retval, i64 0, i32 1
+    %addr2 = getelementptr %myStruct, ptr %retval, i64 0, i32 2
 
-    store i64 42, i64* %addr0
-    store i8 2, i8* %addr1
-    store i32 9, i32* %addr2
+    store i64 42, ptr %retval
+    store i8 2, ptr %addr1
+    store i32 9, ptr %addr2
 ; CHECK: str {{x[0-9]+}}, [x8]
 ; CHECK: strb {{w[0-9]+}}, [x8, #8]
 ; CHECK: str {{w[0-9]+}}, [x8, #12]
@@ -126,22 +122,21 @@ define dso_local void @return_large_struct(%myStruct* sret(%myStruct) %retval) {
 ; available, but it needs two). Also make sure that %stacked doesn't
 ; sneak into x7 behind.
 define dso_local i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
-                          i32* %var6, %myStruct* byval(%myStruct) %struct, i32* byval(i32) %stacked,
+                          ptr %var6, ptr byval(%myStruct) %struct, ptr byval(i32) %stacked,
                           double %notstacked) {
 ; CHECK-LABEL: struct_on_stack:
-    %addr = getelementptr %myStruct, %myStruct* %struct, i64 0, i32 0
-    %val64 = load volatile i64, i64* %addr
-    store volatile i64 %val64, i64* @var64
+    %val64 = load volatile i64, ptr %struct
+    store volatile i64 %val64, ptr @var64
     ; Currently nothing on local stack, so struct should be at sp
 ; CHECK: ldr [[VAL64:x[0-9]+]], [sp]
 ; CHECK: str [[VAL64]], [{{x[0-9]+}}, {{#?}}:lo12:var64]
 
-    store volatile double %notstacked, double* @vardouble
+    store volatile double %notstacked, ptr @vardouble
 ; CHECK-NOT: ldr d0
 ; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble
 ; CHECK-NOFP-NOT: str d0,
 
-    %retval = load volatile i32, i32* %stacked
+    %retval = load volatile i32, ptr %stacked
     ret i32 %retval
 ; CHECK-LE: ldr w0, [sp, #16]
 }
@@ -150,7 +145,7 @@ define dso_local void @stacked_fpu(float %var0, double %var1, float %var2, float
                          float %var4, float %var5, float %var6, float %var7,
                          float %var8) {
 ; CHECK-LABEL: stacked_fpu:
-    store float %var8, float* @varfloat
+    store float %var8, ptr @varfloat
     ; Beware as above: the offset would be 
diff erent on big-endian
     ; machines if the first ldr were changed to use s-registers.
 ; CHECK: ldr {{[ds]}}[[VALFLOAT:[0-9]+]], [sp]
@@ -163,7 +158,7 @@ define dso_local void @stacked_fpu(float %var0, double %var1, float %var2, float
 ; the reverse. In this case x2 and x3. Nothing should use x1.
 define dso_local i64 @check_i128_regalign(i32 %val0, i128 %val1, i64 %val2) {
 ; CHECK-LABEL: check_i128_regalign
-    store i128 %val1, i128* @var128
+    store i128 %val1, ptr @var128
 ; CHECK-DAG: add x[[VAR128:[0-9]+]], {{x[0-9]+}}, :lo12:var128
 ; CHECK-DAG: stp x2, x3, [x[[VAR128]]]
 
@@ -175,7 +170,7 @@ define dso_local void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i3
                                    i32 %val4, i32 %val5, i32 %val6, i32 %val7,
                                    i32 %stack1, i128 %stack2) {
 ; CHECK-LABEL: check_i128_stackalign
-    store i128 %stack2, i128* @var128
+    store i128 %stack2, ptr @var128
     ; Nothing local on stack in current codegen, so first stack is 16 away
 ; CHECK-LE: add     x[[REG:[0-9]+]], sp, #16
 ; CHECK-LE: ldr {{x[0-9]+}}, [x[[REG]], #8]
@@ -186,11 +181,11 @@ define dso_local void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i3
     ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
 
 define dso_local i32 @test_extern() {
 ; CHECK-LABEL: test_extern:
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 undef, i8* align 4 undef, i32 undef, i1 0)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 undef, ptr align 4 undef, i32 undef, i1 0)
 ; CHECK: bl memcpy
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/AArch64/func-calls.ll b/llvm/test/CodeGen/AArch64/func-calls.ll
index 4403f934d9943..ec7bcdd7a7ba4 100644
--- a/llvm/test/CodeGen/AArch64/func-calls.ll
+++ b/llvm/test/CodeGen/AArch64/func-calls.ll
@@ -21,15 +21,15 @@ declare void @take_floats(float %val1, float %val2)
 
 define dso_local void @simple_args() {
 ; CHECK-LABEL: simple_args:
-  %char1 = load i8, i8* @var8
-  %char2 = load i8, i8* @var8_2
+  %char1 = load i8, ptr @var8
+  %char2 = load i8, ptr @var8_2
   call void @take_i8s(i8 %char1, i8 %char2)
 ; CHECK-DAG: ldrb w0, [{{x[0-9]+}}, {{#?}}:lo12:var8]
 ; CHECK-DAG: ldrb w1, [{{x[0-9]+}}, {{#?}}:lo12:var8_2]
 ; CHECK: bl take_i8s
 
-  %float1 = load float, float* @varfloat
-  %float2 = load float, float* @varfloat_2
+  %float1 = load float, ptr @varfloat
+  %float2 = load float, ptr @varfloat_2
   call void @take_floats(float %float1, float %float2)
 ; CHECK-DAG: ldr s1, [{{x[0-9]+}}, {{#?}}:lo12:varfloat_2]
 ; CHECK-DAG: ldr s0, [{{x[0-9]+}}, {{#?}}:lo12:varfloat]
@@ -43,29 +43,29 @@ define dso_local void @simple_args() {
 declare i32 @return_int()
 declare double @return_double()
 declare [2 x i64] @return_smallstruct()
-declare void @return_large_struct(%myStruct* sret(%myStruct) %retval)
+declare void @return_large_struct(ptr sret(%myStruct) %retval)
 
 define dso_local void @simple_rets() {
 ; CHECK-LABEL: simple_rets:
 
   %int = call i32 @return_int()
-  store i32 %int, i32* @var32
+  store i32 %int, ptr @var32
 ; CHECK: bl return_int
 ; CHECK: str w0, [{{x[0-9]+}}, {{#?}}:lo12:var32]
 
   %dbl = call double @return_double()
-  store double %dbl, double* @vardouble
+  store double %dbl, ptr @vardouble
 ; CHECK: bl return_double
 ; CHECK: str d0, [{{x[0-9]+}}, {{#?}}:lo12:vardouble]
 ; CHECK-NOFP-NOT: str d0,
 
   %arr = call [2 x i64] @return_smallstruct()
-  store [2 x i64] %arr, [2 x i64]* @varsmallstruct
+  store [2 x i64] %arr, ptr @varsmallstruct
 ; CHECK: bl return_smallstruct
 ; CHECK: add x[[VARSMALLSTRUCT:[0-9]+]], {{x[0-9]+}}, :lo12:varsmallstruct
 ; CHECK: stp x0, x1, [x[[VARSMALLSTRUCT]]]
 
-  call void @return_large_struct(%myStruct* sret(%myStruct) @varstruct)
+  call void @return_large_struct(ptr sret(%myStruct) @varstruct)
 ; CHECK: add x8, {{x[0-9]+}}, {{#?}}:lo12:varstruct
 ; CHECK: bl return_large_struct
 
@@ -74,7 +74,7 @@ define dso_local void @simple_rets() {
 
 
 declare i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
-                             i32* %var6, %myStruct* byval(%myStruct) %struct, i32 %stacked,
+                             ptr %var6, ptr byval(%myStruct) %struct, i32 %stacked,
                              double %notstacked)
 declare void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
                           float %var4, float %var5, float %var6, float %var7,
@@ -83,7 +83,7 @@ declare void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
 define dso_local void @check_stack_args() {
 ; CHECK-LABEL: check_stack_args:
   call i32 @struct_on_stack(i8 0, i16 12, i32 42, i64 99, i128 1,
-                            i32* @var32, %myStruct* byval(%myStruct) @varstruct,
+                            ptr @var32, ptr byval(%myStruct) @varstruct,
                             i32 999, double 1.0)
   ; Want to check that the final double is passed in registers and
   ; that varstruct is passed on the stack. Rather dependent on how a
@@ -122,7 +122,7 @@ declare void @check_i128_regalign(i32 %val0, i128 %val1)
 
 define dso_local void @check_i128_align() {
 ; CHECK-LABEL: check_i128_align:
-  %val = load i128, i128* @var128
+  %val = load i128, ptr @var128
   call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
                                    i32 4, i32 5, i32 6, i32 7,
                                    i32 42, i128 %val)
@@ -146,11 +146,11 @@ define dso_local void @check_i128_align() {
   ret void
 }
 
- at fptr = dso_local global void()* null
+ at fptr = dso_local global ptr null
 
 define dso_local void @check_indirect_call() {
 ; CHECK-LABEL: check_indirect_call:
-  %func = load void()*, void()** @fptr
+  %func = load ptr, ptr @fptr
   call void %func()
 ; CHECK: ldr [[FPTR:x[0-9]+]], [{{x[0-9]+}}, {{#?}}:lo12:fptr]
 ; CHECK: blr [[FPTR]]

diff  --git a/llvm/test/CodeGen/AArch64/funclet-local-stack-size.ll b/llvm/test/CodeGen/AArch64/funclet-local-stack-size.ll
index 3b6ca6a94c19e..3e6eefc303ad5 100644
--- a/llvm/test/CodeGen/AArch64/funclet-local-stack-size.ll
+++ b/llvm/test/CodeGen/AArch64/funclet-local-stack-size.ll
@@ -5,42 +5,40 @@
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.11.0"
 
-%rtti.TypeDescriptor2 = type { i8**, i8*, [3 x i8] }
+%rtti.TypeDescriptor2 = type { ptr, ptr, [3 x i8] }
 
 $"??_R0H at 8" = comdat any
 
-@"??_7type_info@@6B@" = external constant i8*
-@"??_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"??_7type_info@@6B@", i8* null, [3 x i8] c".H\00" }, comdat
+@"??_7type_info@@6B@" = external constant ptr
+@"??_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { ptr @"??_7type_info@@6B@", ptr null, [3 x i8] c".H\00" }, comdat
 
 ; CHECK-LABEL: ?catch$2@?0??func@@YAHHHZZ at 4HA
 ; CHECK: stp x29, x30, [sp, #-16]!
 ; CHECK: ldp x29, x30, [sp], #16
 ; Function Attrs: uwtable
-define dso_local i32 @"?func@@YAHHHZZ"(i32 %a, i32, ...) local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local i32 @"?func@@YAHHHZZ"(i32 %a, i32, ...) local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 {
 entry:
   %arr = alloca [10 x i32], align 4
   %a2 = alloca i32, align 4
-  %1 = bitcast [10 x i32]* %arr to i8*
-  %arraydecay = getelementptr inbounds [10 x i32], [10 x i32]* %arr, i64 0, i64 0
-  %call = call i32 @"?init@@YAHPEAH at Z"(i32* nonnull %arraydecay)
+  %call = call i32 @"?init@@YAHPEAH at Z"(ptr nonnull %arr)
   %call1 = invoke i32 @"?func2@@YAHXZ"()
           to label %cleanup unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %entry
-  %2 = catchswitch within none [label %catch] unwind to caller
+  %1 = catchswitch within none [label %catch] unwind to caller
 
 catch:                                            ; preds = %catch.dispatch
-  %3 = catchpad within %2 [%rtti.TypeDescriptor2* @"??_R0H at 8", i32 0, i32* %a2]
-  %4 = load i32, i32* %a2, align 4
-  %add = add nsw i32 %4, 1
-  catchret from %3 to label %cleanup
+  %2 = catchpad within %1 [ptr @"??_R0H at 8", i32 0, ptr %a2]
+  %3 = load i32, ptr %a2, align 4
+  %add = add nsw i32 %3, 1
+  catchret from %2 to label %cleanup
 
 cleanup:                                          ; preds = %entry, %catch
   %retval.0 = phi i32 [ %add, %catch ], [ %call1, %entry ]
   ret i32 %retval.0
 }
 
-declare dso_local i32 @"?init@@YAHPEAH at Z"(i32*)
+declare dso_local i32 @"?init@@YAHPEAH at Z"(ptr)
 
 declare dso_local i32 @"?func2@@YAHXZ"()
 

diff  --git a/llvm/test/CodeGen/AArch64/funclet-match-add-sub-stack.ll b/llvm/test/CodeGen/AArch64/funclet-match-add-sub-stack.ll
index 67e9c49675cfd..19fe9b85aff2c 100644
--- a/llvm/test/CodeGen/AArch64/funclet-match-add-sub-stack.ll
+++ b/llvm/test/CodeGen/AArch64/funclet-match-add-sub-stack.ll
@@ -5,13 +5,13 @@ target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-pc-windows-msvc19.25.28611"
 
 ; // requires passing arguments on the stack
-; void test2(void*, int, int, int, int, int, int, int, int);
+; void test2(ptr, int, int, int, int, int, int, int, int);
 ;
 ; // function with the funclet being checked
 ; void test1(size_t bytes)
 ; {
 ;   // alloca forces a separate callee save bump and stack bump
-;   void *data = _alloca(bytes);
+;   ptr data = _alloca(bytes);
 ;   try {
 ;     test2(data, 0, 1, 2, 3, 4, 5, 6, 7);
 ;   } catch (...) {
@@ -23,22 +23,22 @@ target triple = "aarch64-pc-windows-msvc19.25.28611"
 ; CHECK: sub sp, sp, #16
 ; CHECK: add sp, sp, #16
 ; Function Attrs: uwtable
-define dso_local void @"?test1@@YAX_K at Z"(i64 %0) #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?test1@@YAX_K at Z"(i64 %0) #0 personality ptr @__CxxFrameHandler3 {
   %2 = alloca i64, align 8
-  %3 = alloca i8*, align 8
-  store i64 %0, i64* %2, align 8
-  %4 = load i64, i64* %2, align 8
+  %3 = alloca ptr, align 8
+  store i64 %0, ptr %2, align 8
+  %4 = load i64, ptr %2, align 8
   %5 = alloca i8, i64 %4, align 16
-  store i8* %5, i8** %3, align 8
-  %6 = load i8*, i8** %3, align 8
-  invoke void @"?test2@@YAXPEAXHHHHHHHH at Z"(i8* %6, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7)
+  store ptr %5, ptr %3, align 8
+  %6 = load ptr, ptr %3, align 8
+  invoke void @"?test2@@YAXPEAXHHHHHHHH at Z"(ptr %6, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7)
           to label %13 unwind label %7
 
 7:                                                ; preds = %1
   %8 = catchswitch within none [label %9] unwind to caller
 
 9:                                                ; preds = %7
-  %10 = catchpad within %8 [i8* null, i32 64, i8* null]
+  %10 = catchpad within %8 [ptr null, i32 64, ptr null]
   catchret from %10 to label %11
 
 11:                                               ; preds = %9
@@ -51,7 +51,7 @@ define dso_local void @"?test1@@YAX_K at Z"(i64 %0) #0 personality i8* bitcast (i32
   br label %12
 }
 
-declare dso_local void @"?test2@@YAXPEAXHHHHHHHH at Z"(i8*, i32, i32, i32, i32, i32, i32, i32, i32) #1
+declare dso_local void @"?test2@@YAXPEAXHHHHHHHH at Z"(ptr, i32, i32, i32, i32, i32, i32, i32, i32) #1
 
 declare dso_local i32 @__CxxFrameHandler3(...)
 

diff  --git a/llvm/test/CodeGen/AArch64/funcptr_cast.ll b/llvm/test/CodeGen/AArch64/funcptr_cast.ll
index 406eccf6324ca..49b8764c7ddd1 100644
--- a/llvm/test/CodeGen/AArch64/funcptr_cast.ll
+++ b/llvm/test/CodeGen/AArch64/funcptr_cast.ll
@@ -8,7 +8,7 @@ define i8 @test() {
 ; CHECK-NEXT:    ldrb w0, [x8, :lo12:foo]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* bitcast (void (...)* @foo to i8*), align 1
+  %0 = load i8, ptr @foo, align 1
   ret i8 %0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/gep-nullptr.ll b/llvm/test/CodeGen/AArch64/gep-nullptr.ll
index e5e359c0b668d..8ac4314324ef4 100644
--- a/llvm/test/CodeGen/AArch64/gep-nullptr.ll
+++ b/llvm/test/CodeGen/AArch64/gep-nullptr.ll
@@ -2,11 +2,11 @@
 target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-%structA = type { i8, i8, i8, i8, i8, i8, [4 x i8], i8, i8, [2 x i32], [2 x %unionMV], [4 x [2 x %unionMV]], [4 x [2 x %unionMV]], [4 x i8], i8*, i8*, i32, i8* }
+%structA = type { i8, i8, i8, i8, i8, i8, [4 x i8], i8, i8, [2 x i32], [2 x %unionMV], [4 x [2 x %unionMV]], [4 x [2 x %unionMV]], [4 x i8], ptr, ptr, i32, ptr }
 %unionMV = type { i32 }
 
 ; Function Attrs: nounwind
-define void @test(%structA* %mi_block) {
+define void @test(ptr %mi_block) {
 entry:
   br i1 undef, label %for.body13.us, label %if.else
 
@@ -14,7 +14,7 @@ entry:
 ; CHECK-LABEL: test
 for.body13.us:                                    ; preds = %entry
   %indvars.iv.next40 = or i64 0, 1
-  %packed4.i.us.1 = getelementptr inbounds %structA, %structA* %mi_block, i64 0, i32 11, i64 0, i64 %indvars.iv.next40, i32 0
+  %packed4.i.us.1 = getelementptr inbounds %structA, ptr %mi_block, i64 0, i32 11, i64 0, i64 %indvars.iv.next40, i32 0
   unreachable
 
 if.else:                                          ; preds = %entry

diff  --git a/llvm/test/CodeGen/AArch64/ghc-cc.ll b/llvm/test/CodeGen/AArch64/ghc-cc.ll
index 390cce869fff0..5db7336ac47c2 100644
--- a/llvm/test/CodeGen/AArch64/ghc-cc.ll
+++ b/llvm/test/CodeGen/AArch64/ghc-cc.ll
@@ -51,7 +51,7 @@ entry:
   ; CHECK-NEXT:  bl      bar_i64
   ; CHECK-NEXT:  ret
 
-  %0 = load i64, i64* @base
+  %0 = load i64, ptr @base
   tail call ghccc void @bar_i64( i64 %0 ) nounwind
   ret void
 }
@@ -64,7 +64,7 @@ entry:
   ; CHECK-NEXT:  bl      bar_float
   ; CHECK-NEXT:  ret
 
-  %0 = load float, float* @f1
+  %0 = load float, ptr @f1
   tail call ghccc void @bar_float( float %0 ) nounwind
   ret void
 }
@@ -77,7 +77,7 @@ entry:
   ; CHECK-NEXT:  bl      bar_double
   ; CHECK-NEXT:  ret
 
-  %0 = load double, double* @d1
+  %0 = load double, ptr @d1
   tail call ghccc void @bar_double( double %0 ) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/global-alignment.ll b/llvm/test/CodeGen/AArch64/global-alignment.ll
index 1516ac2569e54..0de44fb321b31 100644
--- a/llvm/test/CodeGen/AArch64/global-alignment.ll
+++ b/llvm/test/CodeGen/AArch64/global-alignment.ll
@@ -3,15 +3,14 @@
 @var32 = dso_local global [3 x i32] zeroinitializer
 @var64 = dso_local global [3 x i64] zeroinitializer
 @var32_align64 = dso_local global [3 x i32] zeroinitializer, align 8
- at alias = dso_local alias [3 x i32], [3 x i32]* @var32_align64
+ at alias = dso_local alias [3 x i32], ptr @var32_align64
 
 define dso_local i64 @test_align32() {
 ; CHECK-LABEL: test_align32:
-  %addr = bitcast [3 x i32]* @var32 to i64*
 
   ; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
   ; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
-  %val = load i64, i64* %addr
+  %val = load i64, ptr @var32
 ; CHECK: adrp [[HIBITS:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:var32
 ; CHECK: ldr x0, [x[[ADDR]]]
@@ -21,11 +20,10 @@ define dso_local i64 @test_align32() {
 
 define dso_local i64 @test_align64() {
 ; CHECK-LABEL: test_align64:
-  %addr = bitcast [3 x i64]* @var64 to i64*
 
   ; However, var64 *is* properly aligned and emitting an adrp/add/ldr would be
   ; inefficient.
-  %val = load i64, i64* %addr
+  %val = load i64, ptr @var64
 ; CHECK: adrp x[[HIBITS:[0-9]+]], var64
 ; CHECK-NOT: add x[[HIBITS]]
 ; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var64]
@@ -35,11 +33,10 @@ define dso_local i64 @test_align64() {
 
 define dso_local i64 @test_var32_align64() {
 ; CHECK-LABEL: test_var32_align64:
-  %addr = bitcast [3 x i32]* @var32_align64 to i64*
 
   ; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
   ; emit an "LDR x0, [x0, #:lo12:var32] instruction to implement this load.
-  %val = load i64, i64* %addr
+  %val = load i64, ptr @var32_align64
 ; CHECK: adrp x[[HIBITS:[0-9]+]], var32_align64
 ; CHECK-NOT: add x[[HIBITS]]
 ; CHECK: ldr x0, [x[[HIBITS]], {{#?}}:lo12:var32_align64]
@@ -49,10 +46,9 @@ define dso_local i64 @test_var32_align64() {
 
 define dso_local i64 @test_var32_alias() {
 ; CHECK-LABEL: test_var32_alias:
-  %addr = bitcast [3 x i32]* @alias to i64*
 
   ; We don't know anything about the alignment of aliases.
-  %val = load i64, i64* %addr
+  %val = load i64, ptr @alias
 ; CHECK: adrp x[[HIBITS:[0-9]+]], alias
 ; CHECK: add x[[ADDR:[0-9]+]], x[[HIBITS]], {{#?}}:lo12:alias
 ; CHECK: ldr x0, [x[[ADDR]]]
@@ -68,16 +64,16 @@ define dso_local i64 @test_yet_another_var() {
   ; @yet_another_var has a preferred alignment of 8, but that's not enough if
   ; we're going to be linking against other things. Its ABI alignment is only 4
   ; so we can't fold the load.
-  %val = load i64, i64* bitcast({i32, i32}* @yet_another_var to i64*)
+  %val = load i64, ptr @yet_another_var
 ; CHECK: adrp [[HIBITS:x[0-9]+]], yet_another_var
 ; CHECK: add x[[ADDR:[0-9]+]], [[HIBITS]], {{#?}}:lo12:yet_another_var
 ; CHECK: ldr x0, [x[[ADDR]]]
   ret i64 %val
 }
 
-define dso_local i64()* @test_functions() {
+define dso_local ptr @test_functions() {
 ; CHECK-LABEL: test_functions:
-  ret i64()* @test_yet_another_var
+  ret ptr @test_yet_another_var
 ; CHECK: adrp [[HIBITS:x[0-9]+]], test_yet_another_var
 ; CHECK: add x0, [[HIBITS]], {{#?}}:lo12:test_yet_another_var
 }

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-1.ll b/llvm/test/CodeGen/AArch64/global-merge-1.ll
index 4b110baa18d4c..cc17e344c211a 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-1.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-1.ll
@@ -15,8 +15,8 @@ define void @f1(i32 %a1, i32 %a2) {
 ;CHECK-APPLE-IOS: adrp	x8, __MergedGlobals at PAGE
 ;CHECK-APPLE-IOS-NOT: adrp
 ;CHECK-APPLE-IOS: add	x8, x8, __MergedGlobals at PAGEOFF
-  store i32 %a1, i32* @m, align 4
-  store i32 %a2, i32* @n, align 4
+  store i32 %a1, ptr @m, align 4
+  store i32 %a2, ptr @n, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-2.ll b/llvm/test/CodeGen/AArch64/global-merge-2.ll
index 92a0398c68cb9..85d814c3177b3 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-2.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-2.ll
@@ -12,8 +12,8 @@ define dso_local void @f1(i32 %a1, i32 %a2) {
 ;CHECK-APPLE-IOS: adrp	x8, __MergedGlobals_x at PAGE
 ;CHECK-APPLE-IOS: add	x8, x8, __MergedGlobals_x at PAGEOFF
 ;CHECK-APPLE-IOS-NOT: adrp
-  store i32 %a1, i32* @x, align 4
-  store i32 %a2, i32* @y, align 4
+  store i32 %a1, ptr @x, align 4
+  store i32 %a2, ptr @y, align 4
   ret void
 }
 
@@ -22,8 +22,8 @@ define dso_local void @g1(i32 %a1, i32 %a2) {
 ;CHECK-APPLE-IOS: adrp	x8, __MergedGlobals_x at PAGE
 ;CHECK-APPLE-IOS: add	x8, x8, __MergedGlobals_x at PAGEOFF
 ;CHECK-APPLE-IOS-NOT: adrp
-  store i32 %a1, i32* @y, align 4
-  store i32 %a2, i32* @z, align 4
+  store i32 %a1, ptr @y, align 4
+  store i32 %a2, ptr @z, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-3.ll b/llvm/test/CodeGen/AArch64/global-merge-3.ll
index cb57c2d9af53d..1fdae070cb90e 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-3.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-3.ll
@@ -20,11 +20,11 @@ define dso_local void @f1(i32 %a1, i32 %a2, i32 %a3) {
 ;CHECK: str     w1, [x9, #400]
 ;CHECK: str     w0, [x9]
 ;CHECK: str     w2, [x8, :lo12:z]
-  %x3 = getelementptr inbounds [100 x i32], [100 x i32]* @x, i32 0, i64 3
-  %y3 = getelementptr inbounds [100 x i32], [100 x i32]* @y, i32 0, i64 3
-  store i32 %a1, i32* %x3, align 4
-  store i32 %a2, i32* %y3, align 4
-  store i32 %a3, i32* @z, align 4
+  %x3 = getelementptr inbounds [100 x i32], ptr @x, i32 0, i64 3
+  %y3 = getelementptr inbounds [100 x i32], ptr @y, i32 0, i64 3
+  store i32 %a1, ptr %x3, align 4
+  store i32 %a2, ptr %y3, align 4
+  store i32 %a3, ptr @z, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-4.ll b/llvm/test/CodeGen/AArch64/global-merge-4.ll
index b03330f5d5c8c..98502dd24dc00 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-4.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-4.ll
@@ -9,26 +9,26 @@ target triple = "arm64-apple-ios7.0.0"
 
 ; Function Attrs: nounwind ssp
 define internal void @initialize() #0 {
-  %1 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %1, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 0), align 4
-  %2 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %2, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 0), align 4
-  %3 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %3, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 1), align 4
-  %4 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %4, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 1), align 4
-  %5 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %5, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 2), align 4
-  %6 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %6, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 2), align 4
-  %7 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %7, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 3), align 4
-  %8 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %8, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 3), align 4
-  %9 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %9, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 4), align 4
-  %10 = tail call i32 bitcast (i32 (...)* @calc to i32 ()*)() #2
-  store i32 %10, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 4), align 4
+  %1 = tail call i32 @calc() #2
+  store i32 %1, ptr @bar, align 4
+  %2 = tail call i32 @calc() #2
+  store i32 %2, ptr @baz, align 4
+  %3 = tail call i32 @calc() #2
+  store i32 %3, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 1), align 4
+  %4 = tail call i32 @calc() #2
+  store i32 %4, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 1), align 4
+  %5 = tail call i32 @calc() #2
+  store i32 %5, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 2), align 4
+  %6 = tail call i32 @calc() #2
+  store i32 %6, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 2), align 4
+  %7 = tail call i32 @calc() #2
+  store i32 %7, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 3), align 4
+  %8 = tail call i32 @calc() #2
+  store i32 %8, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 3), align 4
+  %9 = tail call i32 @calc() #2
+  store i32 %9, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 4), align 4
+  %10 = tail call i32 @calc() #2
+  store i32 %10, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 4), align 4
   ret void
 }
 
@@ -36,32 +36,32 @@ declare i32 @calc(...)
 
 ; Function Attrs: nounwind ssp
 define internal void @calculate() #0 {
-  %1 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 0), align 4
-  %2 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 0), align 4
+  %1 = load i32, ptr @bar, align 4
+  %2 = load i32, ptr @baz, align 4
   %3 = mul nsw i32 %2, %1
-  store i32 %3, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @foo, i64 0, i64 0), align 4
-  %4 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 1), align 4
-  %5 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 1), align 4
+  store i32 %3, ptr @foo, align 4
+  %4 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 1), align 4
+  %5 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 1), align 4
   %6 = mul nsw i32 %5, %4
-  store i32 %6, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @foo, i64 0, i64 1), align 4
-  %7 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 2), align 4
-  %8 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 2), align 4
+  store i32 %6, ptr getelementptr inbounds ([5 x i32], ptr @foo, i64 0, i64 1), align 4
+  %7 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 2), align 4
+  %8 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 2), align 4
   %9 = mul nsw i32 %8, %7
-  store i32 %9, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @foo, i64 0, i64 2), align 4
-  %10 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 3), align 4
-  %11 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 3), align 4
+  store i32 %9, ptr getelementptr inbounds ([5 x i32], ptr @foo, i64 0, i64 2), align 4
+  %10 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 3), align 4
+  %11 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 3), align 4
   %12 = mul nsw i32 %11, %10
-  store i32 %12, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @foo, i64 0, i64 3), align 4
-  %13 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @bar, i64 0, i64 4), align 4
-  %14 = load i32, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @baz, i64 0, i64 4), align 4
+  store i32 %12, ptr getelementptr inbounds ([5 x i32], ptr @foo, i64 0, i64 3), align 4
+  %13 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @bar, i64 0, i64 4), align 4
+  %14 = load i32, ptr getelementptr inbounds ([5 x i32], ptr @baz, i64 0, i64 4), align 4
   %15 = mul nsw i32 %14, %13
-  store i32 %15, i32* getelementptr inbounds ([5 x i32], [5 x i32]* @foo, i64 0, i64 4), align 4
+  store i32 %15, ptr getelementptr inbounds ([5 x i32], ptr @foo, i64 0, i64 4), align 4
   ret void
 }
 
 ; Function Attrs: nounwind readnone ssp
-define internal i32* @returnFoo() #1 {
-  ret i32* getelementptr inbounds ([5 x i32], [5 x i32]* @foo, i64 0, i64 0)
+define internal ptr @returnFoo() #1 {
+  ret ptr @foo
 }
 
 ;CHECK:	.type	.L_MergedGlobals, at object  // @_MergedGlobals

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll b/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll
index bd06706467f77..ab2b4324ccece 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-group-by-use.ll
@@ -19,8 +19,8 @@ define void @f1(i32 %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    add x8, x8, __MergedGlobals.2 at PAGEOFF
 ; CHECK-NEXT:    stp w0, w1, [x8]
 ; CHECK-NEXT:    ret
-  store i32 %a1, i32* @m1, align 4
-  store i32 %a2, i32* @n1, align 4
+  store i32 %a1, ptr @m1, align 4
+  store i32 %a2, ptr @n1, align 4
   ret void
 }
 
@@ -36,9 +36,9 @@ define void @f2(i32 %a1, i32 %a2, i32 %a3) #0 {
 ; CHECK-NEXT:    stp w0, w1, [x8]
 ; CHECK-NEXT:    str w2, [x8, #8]
 ; CHECK-NEXT:    ret
-  store i32 %a1, i32* @m2, align 4
-  store i32 %a2, i32* @n2, align 4
-  store i32 %a3, i32* @o2, align 4
+  store i32 %a1, ptr @m2, align 4
+  store i32 %a2, ptr @n2, align 4
+  store i32 %a3, ptr @o2, align 4
   ret void
 }
 
@@ -57,8 +57,8 @@ define void @f3(i32 %a1, i32 %a2) #0 {
 ; CHECK-NEXT:    str w0, [x8, _m3 at PAGEOFF]
 ; CHECK-NEXT:    str w1, [x9, __MergedGlobals at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 %a1, i32* @m3, align 4
-  store i32 %a2, i32* @n3, align 4
+  store i32 %a1, ptr @m3, align 4
+  store i32 %a2, ptr @n3, align 4
   ret void
 }
 
@@ -73,9 +73,9 @@ define void @f4(i32 %a1, i32 %a2, i32 %a3) #0 {
 ; CHECK-NEXT:    stp w0, w1, [x8, #4]
 ; CHECK-NEXT:    str w2, [x8]
 ; CHECK-NEXT:    ret
-  store i32 %a1, i32* @m4, align 4
-  store i32 %a2, i32* @n4, align 4
-  store i32 %a3, i32* @n3, align 4
+  store i32 %a1, ptr @m4, align 4
+  store i32 %a2, ptr @n4, align 4
+  store i32 %a3, ptr @n3, align 4
   ret void
 }
 
@@ -88,7 +88,7 @@ define void @f5(i32 %a1) #0 {
 ; CHECK-NEXT:    adrp x8, _o5 at PAGE
 ; CHECK-NEXT:    str w0, [x8, _o5 at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 %a1, i32* @o5, align 4
+  store i32 %a1, ptr @o5, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-hidden-minsize.ll b/llvm/test/CodeGen/AArch64/global-merge-hidden-minsize.ll
index aed6a40dc0a42..9c694fc4d289c 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-hidden-minsize.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-hidden-minsize.ll
@@ -4,8 +4,8 @@
 @y = hidden global i32 0, align 4
 
 define hidden void @f() #0 {
-  store i32 0, i32* @x, align 4
-  store i32 0, i32* @y, align 4
+  store i32 0, ptr @x, align 4
+  store i32 0, ptr @y, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll
index 8f5694afd5ed7..9f16d1b87b4ad 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use-minsize.ll
@@ -15,8 +15,8 @@ define void @f1(i32 %a1, i32 %a2) minsize nounwind {
 ; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
 ; CHECK-NEXT: stp w0, w1, [x8]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @m1, align 4
-  store i32 %a2, i32* @n1, align 4
+  store i32 %a1, ptr @m1, align 4
+  store i32 %a2, ptr @n1, align 4
   ret void
 }
 
@@ -30,8 +30,8 @@ define void @f2(i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT: str w0, [x8, _m2 at PAGEOFF]
 ; CHECK-NEXT: str w1, [x9, _n2 at PAGEOFF]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @m2, align 4
-  store i32 %a2, i32* @n2, align 4
+  store i32 %a1, ptr @m2, align 4
+  store i32 %a2, ptr @n2, align 4
   ret void
 }
 
@@ -48,8 +48,8 @@ define void @f3(i32 %a1, i32 %a2) minsize nounwind {
 ; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF+8
 ; CHECK-NEXT: stp w0, w1, [x8]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @m3, align 4
-  store i32 %a2, i32* @n3, align 4
+  store i32 %a1, ptr @m3, align 4
+  store i32 %a2, ptr @n3, align 4
   ret void
 }
 
@@ -62,8 +62,8 @@ define void @f4(i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT: str w0, [x8, [[SET]]@PAGEOFF+8]
 ; CHECK-NEXT: str w1, [x9, _n4 at PAGEOFF]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @m3, align 4
-  store i32 %a2, i32* @n4, align 4
+  store i32 %a1, ptr @m3, align 4
+  store i32 %a2, ptr @n4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll
index be4adda597464..04a5f845ce423 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-ignore-single-use.ll
@@ -15,8 +15,8 @@ define void @f1(i32 %a1, i32 %a2) #0 {
 ; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF
 ; CHECK-NEXT: stp w0, w1, [x8]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @m1, align 4
-  store i32 %a2, i32* @n1, align 4
+  store i32 %a1, ptr @m1, align 4
+  store i32 %a2, ptr @n1, align 4
   ret void
 }
 
@@ -30,9 +30,9 @@ define void @f2(i32 %a1, i32 %a2, i32 %a3) #0 {
 ; CHECK-NEXT: stp w0, w1, [x8]
 ; CHECK-NEXT: str w2, [x8, #8]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @m1, align 4
-  store i32 %a2, i32* @n1, align 4
-  store i32 %a3, i32* @o1, align 4
+  store i32 %a1, ptr @m1, align 4
+  store i32 %a2, ptr @n1, align 4
+  store i32 %a3, ptr @o1, align 4
   ret void
 }
 
@@ -42,8 +42,8 @@ define void @f3(i32 %a1, i32 %a2) #0 {
 ; CHECK-NEXT: add x8, x8, [[SET]]@PAGEOFF+12
 ; CHECK-NEXT: stp w0, w1, [x8]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @m2, align 4
-  store i32 %a2, i32* @n2, align 4
+  store i32 %a1, ptr @m2, align 4
+  store i32 %a2, ptr @n2, align 4
   ret void
 }
 
@@ -54,7 +54,7 @@ define void @f4(i32 %a1) #0 {
 ; CHECK-NEXT: adrp x8, _o2 at PAGE
 ; CHECK-NEXT: str w0, [x8, _o2 at PAGEOFF]
 ; CHECK-NEXT: ret
-  store i32 %a1, i32* @o2, align 4
+  store i32 %a1, ptr @o2, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/global-merge-minsize.ll b/llvm/test/CodeGen/AArch64/global-merge-minsize.ll
index 0d6b9ed31a215..d54b1b7fd5d8b 100644
--- a/llvm/test/CodeGen/AArch64/global-merge-minsize.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge-minsize.ll
@@ -14,8 +14,8 @@ define dso_local i32 @func() minsize optsize {
 ; CHECK-NEXT:  add w0, w8, w9
 ; CHECK-NEXT:  ret
 entry:
-  %0 = load i32, i32* @global0, align 4
-  %1 = load i32, i32* @global1, align 4
+  %0 = load i32, ptr @global0, align 4
+  %1 = load i32, ptr @global1, align 4
   %add = add nsw i32 %1, %0
   ret i32 %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/global-merge.ll b/llvm/test/CodeGen/AArch64/global-merge.ll
index aed1dc4d1c7b0..f2826e4cb00cb 100644
--- a/llvm/test/CodeGen/AArch64/global-merge.ll
+++ b/llvm/test/CodeGen/AArch64/global-merge.ll
@@ -17,8 +17,8 @@ define void @f1(i32 %a1, i32 %a2) {
 ; CHECK-APPLE-IOS-LABEL: f1:
 ; CHECK-APPLE-IOS: adrp x{{[0-9]+}}, __MergedGlobals
 ; CHECK-APPLE-IOS-NOT: adrp
-  store i32 %a1, i32* @m, align 4
-  store i32 %a2, i32* @n, align 4
+  store i32 %a1, ptr @m, align 4
+  store i32 %a2, ptr @n, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/got-abuse.ll b/llvm/test/CodeGen/AArch64/got-abuse.ll
index 7a02b104e777a..949443bce869e 100644
--- a/llvm/test/CodeGen/AArch64/got-abuse.ll
+++ b/llvm/test/CodeGen/AArch64/got-abuse.ll
@@ -15,7 +15,7 @@ declare void @func()
 define void @foo() nounwind {
 ; CHECK-LABEL: foo:
 entry:
-  call void @consume(i32 ptrtoint (void ()* @func to i32))
+  call void @consume(i32 ptrtoint (ptr @func to i32))
 ; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:func
 ; CHECK: ldr {{x[0-9]+}}, [x[[ADDRHI]], {{#?}}:got_lo12:func]
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/half.ll b/llvm/test/CodeGen/AArch64/half.ll
index 75e26a389784e..577cd8b02b8a5 100644
--- a/llvm/test/CodeGen/AArch64/half.ll
+++ b/llvm/test/CodeGen/AArch64/half.ll
@@ -1,23 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
 
-define void @test_load_store(half* %in, half* %out) {
+define void @test_load_store(ptr %in, ptr %out) {
 ; CHECK-LABEL: test_load_store:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x0]
 ; CHECK-NEXT:    str h0, [x1]
 ; CHECK-NEXT:    ret
-  %val = load half, half* %in
-  store half %val, half* %out
+  %val = load half, ptr %in
+  store half %val, ptr %out
   ret void
 }
 
-define i16 @test_bitcast_from_half(half* %addr) {
+define i16 @test_bitcast_from_half(ptr %addr) {
 ; CHECK-LABEL: test_bitcast_from_half:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w0, [x0]
 ; CHECK-NEXT:    ret
-  %val = load half, half* %addr
+  %val = load half, ptr %addr
   %val_int = bitcast half %val to i16
   ret i16 %val_int
 }
@@ -32,13 +32,13 @@ define i16 @test_reg_bitcast_from_half(half %in) {
   ret i16 %val
 }
 
-define void @test_bitcast_to_half(half* %addr, i16 %in) {
+define void @test_bitcast_to_half(ptr %addr, i16 %in) {
 ; CHECK-LABEL: test_bitcast_to_half:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    strh w1, [x0]
 ; CHECK-NEXT:    ret
   %val_fp = bitcast i16 %in to half
-  store half %val_fp, half* %addr
+  store half %val_fp, ptr %addr
   ret void
 }
 
@@ -52,47 +52,47 @@ define half @test_reg_bitcast_to_half(i16 %in) {
   ret half %val
 }
 
-define float @test_extend32(half* %addr) {
+define float @test_extend32(ptr %addr) {
 ; CHECK-LABEL: test_extend32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x0]
 ; CHECK-NEXT:    fcvt s0, h0
 ; CHECK-NEXT:    ret
-  %val16 = load half, half* %addr
+  %val16 = load half, ptr %addr
   %val32 = fpext half %val16 to float
   ret float %val32
 }
 
-define double @test_extend64(half* %addr) {
+define double @test_extend64(ptr %addr) {
 ; CHECK-LABEL: test_extend64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x0]
 ; CHECK-NEXT:    fcvt d0, h0
 ; CHECK-NEXT:    ret
-  %val16 = load half, half* %addr
+  %val16 = load half, ptr %addr
   %val32 = fpext half %val16 to double
   ret double %val32
 }
 
-define void @test_trunc32(float %in, half* %addr) {
+define void @test_trunc32(float %in, ptr %addr) {
 ; CHECK-LABEL: test_trunc32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvt h0, s0
 ; CHECK-NEXT:    str h0, [x0]
 ; CHECK-NEXT:    ret
   %val16 = fptrunc float %in to half
-  store half %val16, half* %addr
+  store half %val16, ptr %addr
   ret void
 }
 
-define void @test_trunc64(double %in, half* %addr) {
+define void @test_trunc64(double %in, ptr %addr) {
 ; CHECK-LABEL: test_trunc64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fcvt h0, d0
 ; CHECK-NEXT:    str h0, [x0]
 ; CHECK-NEXT:    ret
   %val16 = fptrunc double %in to half
-  store half %val16, half* %addr
+  store half %val16, ptr %addr
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/hwasan-check-memaccess.ll b/llvm/test/CodeGen/AArch64/hwasan-check-memaccess.ll
index 2510e19f092d7..018f58c36eb1a 100644
--- a/llvm/test/CodeGen/AArch64/hwasan-check-memaccess.ll
+++ b/llvm/test/CodeGen/AArch64/hwasan-check-memaccess.ll
@@ -2,7 +2,7 @@
 
 target triple = "aarch64--linux-android"
 
-define i8* @f1(i8* %x0, i8* %x1) {
+define ptr @f1(ptr %x0, ptr %x1) {
   ; CHECK: f1:
   ; CHECK: str x30, [sp, #-16]!
   ; CHECK-NEXT: .cfi_def_cfa_offset 16
@@ -12,11 +12,11 @@ define i8* @f1(i8* %x0, i8* %x1) {
   ; CHECK-NEXT: bl __hwasan_check_x1_1
   ; CHECK-NEXT: ldr x30, [sp], #16
   ; CHECK-NEXT: ret
-  call void @llvm.hwasan.check.memaccess(i8* %x0, i8* %x1, i32 1)
-  ret i8* %x1
+  call void @llvm.hwasan.check.memaccess(ptr %x0, ptr %x1, i32 1)
+  ret ptr %x1
 }
 
-define i8* @f2(i8* %x0, i8* %x1) {
+define ptr @f2(ptr %x0, ptr %x1) {
   ; CHECK: f2:
   ; CHECK: stp x30, x20, [sp, #-16]!
   ; CHECK-NEXT: .cfi_def_cfa_offset 16
@@ -26,18 +26,18 @@ define i8* @f2(i8* %x0, i8* %x1) {
   ; CHECK-NEXT: bl __hwasan_check_x0_2_short_v2
   ; CHECK-NEXT: ldp x30, x20, [sp], #16
   ; CHECK-NEXT: ret
-  call void @llvm.hwasan.check.memaccess.shortgranules(i8* %x1, i8* %x0, i32 2)
-  ret i8* %x0
+  call void @llvm.hwasan.check.memaccess.shortgranules(ptr %x1, ptr %x0, i32 2)
+  ret ptr %x0
 }
 
-define void @f3(i8* %x0, i8* %x1) {
+define void @f3(ptr %x0, ptr %x1) {
   ; 0x3ff0000 (kernel, match-all = 0xff)
-  call void @llvm.hwasan.check.memaccess(i8* %x0, i8* %x1, i32 67043328)
+  call void @llvm.hwasan.check.memaccess(ptr %x0, ptr %x1, i32 67043328)
   ret void
 }
 
-declare void @llvm.hwasan.check.memaccess(i8*, i8*, i32)
-declare void @llvm.hwasan.check.memaccess.shortgranules(i8*, i8*, i32)
+declare void @llvm.hwasan.check.memaccess(ptr, ptr, i32)
+declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32)
 
 ; CHECK:      .section .text.hot,"axG", at progbits,__hwasan_check_x0_2_short_v2,comdat
 ; CHECK-NEXT: .type __hwasan_check_x0_2_short_v2, at function

diff  --git a/llvm/test/CodeGen/AArch64/hwasan-prefer-fp.ll b/llvm/test/CodeGen/AArch64/hwasan-prefer-fp.ll
index d1b8c787ee942..467cae873415b 100644
--- a/llvm/test/CodeGen/AArch64/hwasan-prefer-fp.ll
+++ b/llvm/test/CodeGen/AArch64/hwasan-prefer-fp.ll
@@ -4,17 +4,17 @@ target triple="aarch64--"
 
 define void @f() sanitize_hwaddress !dbg !6 {
 entry:
-  %x = call i8* @g(i32 0)
+  %x = call ptr @g(i32 0)
   %a = alloca [128 x i8]
   %b = alloca [128 x i8]
   ; CHECK: DW_AT_location (DW_OP_fbreg
-  call void @llvm.dbg.declare(metadata [128 x i8]* %a, metadata !12, metadata !DIExpression()), !dbg !14
+  call void @llvm.dbg.declare(metadata ptr %a, metadata !12, metadata !DIExpression()), !dbg !14
   ; CHECK: DW_AT_location (DW_OP_fbreg
-  call void @llvm.dbg.declare(metadata [128 x i8]* %b, metadata !13, metadata !DIExpression()), !dbg !14
+  call void @llvm.dbg.declare(metadata ptr %b, metadata !13, metadata !DIExpression()), !dbg !14
   ret void, !dbg !15
 }
 
-declare i8* @g(i32)
+declare ptr @g(i32)
 
 declare void @llvm.dbg.declare(metadata, metadata, metadata)
 

diff  --git a/llvm/test/CodeGen/AArch64/i1-contents.ll b/llvm/test/CodeGen/AArch64/i1-contents.ll
index b3014e4c7b19f..6fc01fd19335b 100644
--- a/llvm/test/CodeGen/AArch64/i1-contents.ll
+++ b/llvm/test/CodeGen/AArch64/i1-contents.ll
@@ -11,7 +11,7 @@ define dso_local void @consume_i1_arg(i1 %in) {
 ; CHECK: and [[BOOL32:w[0-9]+]], w0, #{{0x1|0xff}}
 ; CHECK: str [[BOOL32]], [{{x[0-9]+}}, :lo12:var]
   %val = zext i1 %in to %big
-  store %big %val, %big* @var
+  store %big %val, ptr @var
   ret void
 }
 
@@ -24,7 +24,7 @@ define dso_local void @consume_i1_ret() {
 ; CHECK: str [[BOOL32]], [{{x[0-9]+}}, :lo12:var]
   %val1 = call i1 @produce_i1_ret()
   %val = zext i1 %val1 to %big
-  store %big %val, %big* @var
+  store %big %val, ptr @var
   ret void
 }
 
@@ -33,7 +33,7 @@ define dso_local i1 @produce_i1_ret() {
 ; CHECK-LABEL: produce_i1_ret:
 ; CHECK: ldr [[VAR32:w[0-9]+]], [{{x[0-9]+}}, :lo12:var]
 ; CHECK: and w0, [[VAR32]], #{{0x1|0xff}}
-  %val = load %big, %big* @var
+  %val = load %big, ptr @var
   %val1 = trunc %big %val to i1
   ret i1 %val1
 }
@@ -43,7 +43,7 @@ define dso_local void @produce_i1_arg() {
 ; CHECK: ldr [[VAR32:w[0-9]+]], [{{x[0-9]+}}, :lo12:var]
 ; CHECK: and w0, [[VAR32]], #{{0x1|0xff}}
 ; CHECK: bl consume_i1_arg
-  %val = load %big, %big* @var
+  %val = load %big, ptr @var
   %val1 = trunc %big %val to i1
   call void @consume_i1_arg(i1 %val1)
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/i128-align.ll b/llvm/test/CodeGen/AArch64/i128-align.ll
index ee0b8077baf0e..b7c62285efc11 100644
--- a/llvm/test/CodeGen/AArch64/i128-align.ll
+++ b/llvm/test/CodeGen/AArch64/i128-align.ll
@@ -6,10 +6,10 @@
 
 define i64 @check_size() {
 ; CHECK-LABEL: check_size:
-  %starti = ptrtoint %struct* @var to i64
+  %starti = ptrtoint ptr @var to i64
 
-  %endp = getelementptr %struct, %struct* @var, i64 1
-  %endi = ptrtoint %struct* %endp to i64
+  %endp = getelementptr %struct, ptr @var, i64 1
+  %endi = ptrtoint ptr %endp to i64
 
   %
diff  = sub i64 %endi, %starti
   ret i64 %
diff 
@@ -18,10 +18,10 @@ define i64 @check_size() {
 
 define i64 @check_field() {
 ; CHECK-LABEL: check_field:
-  %starti = ptrtoint %struct* @var to i64
+  %starti = ptrtoint ptr @var to i64
 
-  %endp = getelementptr %struct, %struct* @var, i64 0, i32 1
-  %endi = ptrtoint i128* %endp to i64
+  %endp = getelementptr %struct, ptr @var, i64 0, i32 1
+  %endi = ptrtoint ptr %endp to i64
 
   %
diff  = sub i64 %endi, %starti
   ret i64 %
diff 

diff  --git a/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll b/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
index 17db35c529f0a..2b71a9354c5ce 100644
--- a/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
+++ b/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
@@ -14,8 +14,8 @@ define void @test1() {
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
-  %tmp = load volatile i128, i128* @x
-  store volatile i128 %tmp, i128* @y
+  %tmp = load volatile i128, ptr @x
+  store volatile i128 %tmp, ptr @y
   ret void
 }
 
@@ -29,8 +29,8 @@ define void @test2() {
 ; CHECK-NEXT:    ldp x8, x9, [x8, #504]
 ; CHECK-NEXT:    stp x8, x9, [x10, #504]
 ; CHECK-NEXT:    ret
-  %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 504) to i128*)
-  store volatile i128 %tmp, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @y to i8*), i64 504) to i128*)
+  %tmp = load volatile i128, ptr getelementptr (i8, ptr @x, i64 504)
+  store volatile i128 %tmp, ptr getelementptr (i8, ptr @y, i64 504)
   ret void
 }
 
@@ -46,8 +46,8 @@ define void @test3() {
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
-  %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 512) to i128*)
-  store volatile i128 %tmp, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @y to i8*), i64 512) to i128*)
+  %tmp = load volatile i128, ptr getelementptr (i8, ptr @x, i64 512)
+  store volatile i128 %tmp, ptr getelementptr (i8, ptr @y, i64 512)
   ret void
 }
 
@@ -61,8 +61,8 @@ define void @test4() {
 ; CHECK-NEXT:    ldp x8, x9, [x8, #-512]
 ; CHECK-NEXT:    stp x8, x9, [x10, #-512]
 ; CHECK-NEXT:    ret
-  %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -512) to i128*)
-  store volatile i128 %tmp, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @y to i8*), i64 -512) to i128*)
+  %tmp = load volatile i128, ptr getelementptr (i8, ptr @x, i64 -512)
+  store volatile i128 %tmp, ptr getelementptr (i8, ptr @y, i64 -512)
   ret void
 }
 
@@ -78,8 +78,8 @@ define void @test5() {
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
-  %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*)
-  store volatile i128 %tmp, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @y to i8*), i64 -520) to i128*)
+  %tmp = load volatile i128, ptr getelementptr (i8, ptr @x, i64 -520)
+  store volatile i128 %tmp, ptr getelementptr (i8, ptr @y, i64 -520)
   ret void
 }
 
@@ -95,8 +95,8 @@ define void @test6() {
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
-  %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*)
-  store volatile i128 %tmp, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @y to i8*), i64 -520) to i128*)
+  %tmp = load volatile i128, ptr getelementptr (i8, ptr @x, i64 -520)
+  store volatile i128 %tmp, ptr getelementptr (i8, ptr @y, i64 -520)
   ret void
 }
 
@@ -112,7 +112,7 @@ define void @test7() {
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
-  %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 503) to i128*)
-  store volatile i128 %tmp, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @y to i8*), i64 503) to i128*)
+  %tmp = load volatile i128, ptr getelementptr (i8, ptr @x, i64 503)
+  store volatile i128 %tmp, ptr getelementptr (i8, ptr @y, i64 503)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/illegal-float-ops.ll b/llvm/test/CodeGen/AArch64/illegal-float-ops.ll
index c4fc5df5d296a..20435e10d89ff 100644
--- a/llvm/test/CodeGen/AArch64/illegal-float-ops.ll
+++ b/llvm/test/CodeGen/AArch64/illegal-float-ops.ll
@@ -13,15 +13,15 @@ define void @test_cos(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_cos:
 
    %cosfloat = call float @llvm.cos.f32(float %float)
-   store float %cosfloat, float* @varfloat
+   store float %cosfloat, ptr @varfloat
 ; CHECK: bl cosf
 
    %cosdouble = call double @llvm.cos.f64(double %double)
-   store double %cosdouble, double* @vardouble
+   store double %cosdouble, ptr @vardouble
 ; CHECK: bl cos
 
    %cosfp128 = call fp128 @llvm.cos.f128(fp128 %fp128)
-   store fp128 %cosfp128, fp128* @varfp128
+   store fp128 %cosfp128, ptr @varfp128
 ; CHECK: bl cosl
 
   ret void
@@ -35,15 +35,15 @@ define void @test_exp(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_exp:
 
    %expfloat = call float @llvm.exp.f32(float %float)
-   store float %expfloat, float* @varfloat
+   store float %expfloat, ptr @varfloat
 ; CHECK: bl expf
 
    %expdouble = call double @llvm.exp.f64(double %double)
-   store double %expdouble, double* @vardouble
+   store double %expdouble, ptr @vardouble
 ; CHECK: bl exp
 
    %expfp128 = call fp128 @llvm.exp.f128(fp128 %fp128)
-   store fp128 %expfp128, fp128* @varfp128
+   store fp128 %expfp128, ptr @varfp128
 ; CHECK: bl expl
 
   ret void
@@ -57,15 +57,15 @@ define void @test_exp2(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_exp2:
 
    %exp2float = call float @llvm.exp2.f32(float %float)
-   store float %exp2float, float* @varfloat
+   store float %exp2float, ptr @varfloat
 ; CHECK: bl exp2f
 
    %exp2double = call double @llvm.exp2.f64(double %double)
-   store double %exp2double, double* @vardouble
+   store double %exp2double, ptr @vardouble
 ; CHECK: bl exp2
 
    %exp2fp128 = call fp128 @llvm.exp2.f128(fp128 %fp128)
-   store fp128 %exp2fp128, fp128* @varfp128
+   store fp128 %exp2fp128, ptr @varfp128
 ; CHECK: bl exp2l
   ret void
 
@@ -79,15 +79,15 @@ define void @test_log(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_log:
 
    %logfloat = call float @llvm.log.f32(float %float)
-   store float %logfloat, float* @varfloat
+   store float %logfloat, ptr @varfloat
 ; CHECK: bl logf
 
    %logdouble = call double @llvm.log.f64(double %double)
-   store double %logdouble, double* @vardouble
+   store double %logdouble, ptr @vardouble
 ; CHECK: bl log
 
    %logfp128 = call fp128 @llvm.log.f128(fp128 %fp128)
-   store fp128 %logfp128, fp128* @varfp128
+   store fp128 %logfp128, ptr @varfp128
 ; CHECK: bl logl
 
   ret void
@@ -101,15 +101,15 @@ define void @test_log2(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_log2:
 
    %log2float = call float @llvm.log2.f32(float %float)
-   store float %log2float, float* @varfloat
+   store float %log2float, ptr @varfloat
 ; CHECK: bl log2f
 
    %log2double = call double @llvm.log2.f64(double %double)
-   store double %log2double, double* @vardouble
+   store double %log2double, ptr @vardouble
 ; CHECK: bl log2
 
    %log2fp128 = call fp128 @llvm.log2.f128(fp128 %fp128)
-   store fp128 %log2fp128, fp128* @varfp128
+   store fp128 %log2fp128, ptr @varfp128
 ; CHECK: bl log2l
   ret void
 
@@ -123,15 +123,15 @@ define void @test_log10(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_log10:
 
    %log10float = call float @llvm.log10.f32(float %float)
-   store float %log10float, float* @varfloat
+   store float %log10float, ptr @varfloat
 ; CHECK: bl log10f
 
    %log10double = call double @llvm.log10.f64(double %double)
-   store double %log10double, double* @vardouble
+   store double %log10double, ptr @vardouble
 ; CHECK: bl log10
 
    %log10fp128 = call fp128 @llvm.log10.f128(fp128 %fp128)
-   store fp128 %log10fp128, fp128* @varfp128
+   store fp128 %log10fp128, ptr @varfp128
 ; CHECK: bl log10l
 
   ret void
@@ -145,15 +145,15 @@ define void @test_sin(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_sin:
 
    %sinfloat = call float @llvm.sin.f32(float %float)
-   store float %sinfloat, float* @varfloat
+   store float %sinfloat, ptr @varfloat
 ; CHECK: bl sinf
 
    %sindouble = call double @llvm.sin.f64(double %double)
-   store double %sindouble, double* @vardouble
+   store double %sindouble, ptr @vardouble
 ; CHECK: bl sin
 
    %sinfp128 = call fp128 @llvm.sin.f128(fp128 %fp128)
-   store fp128 %sinfp128, fp128* @varfp128
+   store fp128 %sinfp128, ptr @varfp128
 ; CHECK: bl sinl
   ret void
 
@@ -167,15 +167,15 @@ define void @test_pow(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_pow:
 
    %powfloat = call float @llvm.pow.f32(float %float, float %float)
-   store float %powfloat, float* @varfloat
+   store float %powfloat, ptr @varfloat
 ; CHECK: bl powf
 
    %powdouble = call double @llvm.pow.f64(double %double, double %double)
-   store double %powdouble, double* @vardouble
+   store double %powdouble, ptr @vardouble
 ; CHECK: bl pow
 
    %powfp128 = call fp128 @llvm.pow.f128(fp128 %fp128, fp128 %fp128)
-   store fp128 %powfp128, fp128* @varfp128
+   store fp128 %powfp128, ptr @varfp128
 ; CHECK: bl powl
 
   ret void
@@ -189,15 +189,15 @@ define void @test_powi(float %float, double %double, i32 %exponent, fp128 %fp128
 ; CHECK-LABEL: test_powi:
 
    %powifloat = call float @llvm.powi.f32.i32(float %float, i32 %exponent)
-   store float %powifloat, float* @varfloat
+   store float %powifloat, ptr @varfloat
 ; CHECK: bl __powisf2
 
    %powidouble = call double @llvm.powi.f64.i32(double %double, i32 %exponent)
-   store double %powidouble, double* @vardouble
+   store double %powidouble, ptr @vardouble
 ; CHECK: bl __powidf2
 
    %powifp128 = call fp128 @llvm.powi.f128.i32(fp128 %fp128, i32 %exponent)
-   store fp128 %powifp128, fp128* @varfp128
+   store fp128 %powifp128, ptr @varfp128
 ; CHECK: bl __powitf2
   ret void
 
@@ -207,15 +207,15 @@ define void @test_frem(float %float, double %double, fp128 %fp128) {
 ; CHECK-LABEL: test_frem:
 
   %fremfloat = frem float %float, %float
-  store float %fremfloat, float* @varfloat
+  store float %fremfloat, ptr @varfloat
 ; CHECK: bl fmodf
 
   %fremdouble = frem double %double, %double
-  store double %fremdouble, double* @vardouble
+  store double %fremdouble, ptr @vardouble
 ; CHECK: bl fmod
 
   %fremfp128 = frem fp128 %fp128, %fp128
-  store fp128 %fremfp128, fp128* @varfp128
+  store fp128 %fremfp128, ptr @varfp128
 ; CHECK: bl fmodl
 
   ret void
@@ -227,7 +227,7 @@ define void @test_fma(fp128 %fp128) {
 ; CHECK-LABEL: test_fma:
 
   %fmafp128 = call fp128 @llvm.fma.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
-  store fp128 %fmafp128, fp128* @varfp128
+  store fp128 %fmafp128, ptr @varfp128
 ; CHECK: bl fmal
 
   ret void
@@ -239,7 +239,7 @@ define void @test_fmuladd(fp128 %fp128) {
 ; CHECK-LABEL: test_fmuladd:
 
   %fmuladdfp128 = call fp128 @llvm.fmuladd.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
-  store fp128 %fmuladdfp128, fp128* @varfp128
+  store fp128 %fmuladdfp128, ptr @varfp128
 ; CHECK-NOT: bl fmal
 ; CHECK: bl __multf3
 ; CHECK: bl __addtf3
@@ -297,7 +297,7 @@ define i128 @test_fptoui128(fp128 %a) {
 
 define void @test_exp_finite(double %double) #0 {
   %expdouble = call double @llvm.exp.f64(double %double)
-  store double %expdouble, double* @vardouble
+  store double %expdouble, ptr @vardouble
   ; ANDROID-AARCH64-NOT: bl __exp_finite
   ; CHECK: bl exp
 
@@ -306,7 +306,7 @@ define void @test_exp_finite(double %double) #0 {
 
 define void @test_exp2_finite(double %double) #0 {
   %expdouble = call double @llvm.exp2.f64(double %double)
-  store double %expdouble, double* @vardouble
+  store double %expdouble, ptr @vardouble
   ; CHECK-NOT: bl __exp2_finite
   ; CHECK: bl exp2
 
@@ -315,7 +315,7 @@ define void @test_exp2_finite(double %double) #0 {
 
 define void @test_log_finite(double %double) #0 {
   %logdouble = call double @llvm.log.f64(double %double)
-  store double %logdouble, double* @vardouble
+  store double %logdouble, ptr @vardouble
   ; CHECK-NOT: bl __log_finite
   ; CHECK: bl log
   ret void
@@ -323,7 +323,7 @@ define void @test_log_finite(double %double) #0 {
 
 define void @test_log2_finite(double %double) #0 {
   %log2double = call double @llvm.log2.f64(double %double)
-  store double %log2double, double* @vardouble
+  store double %log2double, ptr @vardouble
   ; CHECK-NOT: bl __log2_finite
   ; CHECK: bl log2
   ret void
@@ -331,7 +331,7 @@ define void @test_log2_finite(double %double) #0 {
 
 define void @test_log10_finite(double %double) #0 {
   %log10double = call double @llvm.log10.f64(double %double)
-  store double %log10double, double* @vardouble
+  store double %log10double, ptr @vardouble
   ; CHECK-NOT: bl __log10_finite
   ; CHECK: bl log10
   ret void
@@ -339,7 +339,7 @@ define void @test_log10_finite(double %double) #0 {
 
 define void @test_pow_finite(double %double) #0 {
   %powdouble = call double @llvm.pow.f64(double %double, double %double)
-  store double %powdouble, double* @vardouble
+  store double %powdouble, ptr @vardouble
   ; CHECK-NOT: bl __pow_finite
   ; CHECK: bl pow
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/ilp32-tlsdesc.ll b/llvm/test/CodeGen/AArch64/ilp32-tlsdesc.ll
index 89a8fff47f4fa..30f256713361c 100644
--- a/llvm/test/CodeGen/AArch64/ilp32-tlsdesc.ll
+++ b/llvm/test/CodeGen/AArch64/ilp32-tlsdesc.ll
@@ -6,7 +6,7 @@
 define i32 @test_thread_local() {
 ; CHECK-LABEL: test_thread_local:
 
-  %val = load i32, i32* @var
+  %val = load i32, ptr @var
   ret i32 %val
 
 ; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:var

diff  --git a/llvm/test/CodeGen/AArch64/ilp32-va.ll b/llvm/test/CodeGen/AArch64/ilp32-va.ll
index 8f3e1c3898ed3..5c3372f7b9cf4 100644
--- a/llvm/test/CodeGen/AArch64/ilp32-va.ll
+++ b/llvm/test/CodeGen/AArch64/ilp32-va.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -aarch64-load-store-renaming=true -verify-machineinstrs -mtriple=arm64-linux-gnu_ilp32 -pre-RA-sched=linearize -enable-misched=false -disable-post-ra < %s | FileCheck %s
 
-%va_list = type {i8*, i8*, i8*, i32, i32}
+%va_list = type {ptr, ptr, ptr, i32, i32}
 
 @var = dso_local global %va_list zeroinitializer, align 8
 
-declare void @llvm.va_start(i8*)
+declare void @llvm.va_start(ptr)
 
 define dso_local void @test_simple(i32 %n, ...) {
 ; CHECK-LABEL: test_simple:
@@ -36,8 +36,7 @@ define dso_local void @test_simple(i32 %n, ...) {
 ; CHECK: movk    [[GRVR]], #65408, lsl #32
 ; CHECK: stur    [[GRVR]], [x[[VA_LIST]], #12]
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
 
   ret void
 }
@@ -71,8 +70,7 @@ define dso_local void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
 ; CHECK: movk [[GRVR_OFFS]], #65424, lsl #32
 ; CHECK: stur [[GRVR_OFFS]], [x[[VA_LIST]], #12]
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
 
   ret void
 }
@@ -80,8 +78,7 @@ define dso_local void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
 define dso_local void @test_nospare([8 x i64], [8 x float], ...) {
 ; CHECK-LABEL: test_nospare:
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
 ; CHECK-NOT: sub sp, sp
 ; CHECK: mov x[[STACK:[0-9]+]], sp
 ; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
@@ -102,33 +99,29 @@ define dso_local void @test_offsetstack([8 x i64], [2 x i64], [3 x float], ...)
 ; CHECK-DAG: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
 ; CHECK-DAG: str w[[STACK_TOP]], [x[[VAR]]]
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_start(i8* %addr)
+  call void @llvm.va_start(ptr @var)
   ret void
 }
 
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_end(ptr)
 
 define dso_local void @test_va_end() nounwind {
 ; CHECK-LABEL: test_va_end:
 ; CHECK-NEXT: %bb.0
 
-  %addr = bitcast %va_list* @var to i8*
-  call void @llvm.va_end(i8* %addr)
+  call void @llvm.va_end(ptr @var)
 
   ret void
 ; CHECK-NEXT: ret
 }
 
-declare void @llvm.va_copy(i8* %dest, i8* %src)
+declare void @llvm.va_copy(ptr %dest, ptr %src)
 
 @second_list = dso_local global %va_list zeroinitializer
 
 define dso_local void @test_va_copy() {
 ; CHECK-LABEL: test_va_copy:
-  %srcaddr = bitcast %va_list* @var to i8*
-  %dstaddr = bitcast %va_list* @second_list to i8*
-  call void @llvm.va_copy(i8* %dstaddr, i8* %srcaddr)
+  call void @llvm.va_copy(ptr @second_list, ptr @var)
 
 ; CHECK: add x[[SRC:[0-9]+]], {{x[0-9]+}}, :lo12:var
 

diff  --git a/llvm/test/CodeGen/AArch64/implicit-null-check.ll b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
index ed236e0010fb4..052ff7f0fe5d0 100644
--- a/llvm/test/CodeGen/AArch64/implicit-null-check.ll
+++ b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
@@ -6,7 +6,7 @@
 ; related to memory folding of arithmetic (since aarch64 doesn't), and add
 ; a couple of aarch64 specific tests.
 
-define i32 @imp_null_check_load_fallthrough(i32* %x) {
+define i32 @imp_null_check_load_fallthrough(ptr %x) {
 ; CHECK-LABEL: imp_null_check_load_fallthrough:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp0:
@@ -17,11 +17,11 @@ define i32 @imp_null_check_load_fallthrough(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  not_null:
-  %t = load i32, i32* %x
+  %t = load i32, ptr %x
   ret i32 %t
 
 is_null:
@@ -29,7 +29,7 @@ is_null:
 }
 
 
-define i32 @imp_null_check_load_reorder(i32* %x) {
+define i32 @imp_null_check_load_reorder(ptr %x) {
 ; CHECK-LABEL: imp_null_check_load_reorder:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp1:
@@ -40,18 +40,18 @@ define i32 @imp_null_check_load_reorder(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %t = load i32, i32* %x
+  %t = load i32, ptr %x
   ret i32 %t
 }
 
-define i32 @imp_null_check_unordered_load(i32* %x) {
+define i32 @imp_null_check_unordered_load(ptr %x) {
 ; CHECK-LABEL: imp_null_check_unordered_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp2:
@@ -62,21 +62,21 @@ define i32 @imp_null_check_unordered_load(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %t = load atomic i32, i32* %x unordered, align 4
+  %t = load atomic i32, ptr %x unordered, align 4
   ret i32 %t
 }
 
 
 ; TODO: Can be converted into implicit check.
 ;; Probably could be implicit, but we're conservative for now
-define i32 @imp_null_check_seq_cst_load(i32* %x) {
+define i32 @imp_null_check_seq_cst_load(ptr %x) {
 ; CHECK-LABEL: imp_null_check_seq_cst_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB3_2
@@ -87,19 +87,19 @@ define i32 @imp_null_check_seq_cst_load(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %t = load atomic i32, i32* %x seq_cst, align 4
+  %t = load atomic i32, ptr %x seq_cst, align 4
   ret i32 %t
 }
 
 ;; Might be memory mapped IO, so can't rely on fault behavior
-define i32 @imp_null_check_volatile_load(i32* %x) {
+define i32 @imp_null_check_volatile_load(ptr %x) {
 ; CHECK-LABEL: imp_null_check_volatile_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB4_2
@@ -110,19 +110,19 @@ define i32 @imp_null_check_volatile_load(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %t = load volatile i32, i32* %x, align 4
+  %t = load volatile i32, ptr %x, align 4
   ret i32 %t
 }
 
 
-define i8 @imp_null_check_load_i8(i8* %x) {
+define i8 @imp_null_check_load_i8(ptr %x) {
 ; CHECK-LABEL: imp_null_check_load_i8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp3:
@@ -133,18 +133,18 @@ define i8 @imp_null_check_load_i8(i8* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i8* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i8 42
 
  not_null:
-  %t = load i8, i8* %x
+  %t = load i8, ptr %x
   ret i8 %t
 }
 
-define i256 @imp_null_check_load_i256(i256* %x) {
+define i256 @imp_null_check_load_i256(ptr %x) {
 ; CHECK-LABEL: imp_null_check_load_i256:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB6_2
@@ -159,20 +159,20 @@ define i256 @imp_null_check_load_i256(i256* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i256* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i256 42
 
  not_null:
-  %t = load i256, i256* %x
+  %t = load i256, ptr %x
   ret i256 %t
 }
 
 
 
-define i32 @imp_null_check_gep_load(i32* %x) {
+define i32 @imp_null_check_gep_load(ptr %x) {
 ; CHECK-LABEL: imp_null_check_gep_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp4:
@@ -183,19 +183,19 @@ define i32 @imp_null_check_gep_load(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %x.gep = getelementptr i32, i32* %x, i32 32
-  %t = load i32, i32* %x.gep
+  %x.gep = getelementptr i32, ptr %x, i32 32
+  %t = load i32, ptr %x.gep
   ret i32 %t
 }
 
-define i32 @imp_null_check_add_result(i32* %x, i32 %p) {
+define i32 @imp_null_check_add_result(ptr %x, i32 %p) {
 ; CHECK-LABEL: imp_null_check_add_result:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp5:
@@ -207,21 +207,21 @@ define i32 @imp_null_check_add_result(i32* %x, i32 %p) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %t = load i32, i32* %x
+  %t = load i32, ptr %x
   %p1 = add i32 %t, %p
   ret i32 %p1
 }
 
 ; Can hoist over a potential faulting instruction as long as we don't
 ; change the conditions under which the instruction faults.
-define i32 @imp_null_check_hoist_over_udiv(i32* %x, i32 %a, i32 %b) {
+define i32 @imp_null_check_hoist_over_udiv(ptr %x, i32 %a, i32 %b) {
 ; CHECK-LABEL: imp_null_check_hoist_over_udiv:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB9_2
@@ -234,7 +234,7 @@ define i32 @imp_null_check_hoist_over_udiv(i32* %x, i32 %a, i32 %b) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
@@ -242,7 +242,7 @@ define i32 @imp_null_check_hoist_over_udiv(i32* %x, i32 %a, i32 %b) {
 
  not_null:
   %p1 = udiv i32 %a, %b
-  %t = load i32, i32* %x
+  %t = load i32, ptr %x
   %res = add i32 %t, %p1
   ret i32 %res
 }
@@ -250,7 +250,7 @@ define i32 @imp_null_check_hoist_over_udiv(i32* %x, i32 %a, i32 %b) {
 
 ; TODO: We should be able to hoist this - we can on x86, why isn't this
 ; working for aarch64?  Aliasing?
-define i32 @imp_null_check_hoist_over_unrelated_load(i32* %x, i32* %y, i32* %z) {
+define i32 @imp_null_check_hoist_over_unrelated_load(ptr %x, ptr %y, ptr %z) {
 ; CHECK-LABEL: imp_null_check_hoist_over_unrelated_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB10_2
@@ -263,20 +263,20 @@ define i32 @imp_null_check_hoist_over_unrelated_load(i32* %x, i32* %y, i32* %z)
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %t0 = load i32, i32* %y
-  %t1 = load i32, i32* %x
-  store i32 %t0, i32* %z
+  %t0 = load i32, ptr %y
+  %t1 = load i32, ptr %x
+  store i32 %t0, ptr %z
   ret i32 %t1
 }
 
-define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
+define i32 @imp_null_check_gep_load_with_use_dep(ptr %x, i32 %a) {
 ; CHECK-LABEL: imp_null_check_gep_load_with_use_dep:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp6:
@@ -290,24 +290,24 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %x.loc = getelementptr i32, i32* %x, i32 1
-  %y = ptrtoint i32* %x.loc to i32
+  %x.loc = getelementptr i32, ptr %x, i32 1
+  %y = ptrtoint ptr %x.loc to i32
   %b = add i32 %a, %y
-  %t = load i32, i32* %x
+  %t = load i32, ptr %x
   %z = add i32 %t, %b
   ret i32 %z
 }
 
 ;; TODO: We could handle this case as we can lift the fence into the
 ;; previous block before the conditional without changing behavior.
-define i32 @imp_null_check_load_fence1(i32* %x) {
+define i32 @imp_null_check_load_fence1(ptr %x) {
 ; CHECK-LABEL: imp_null_check_load_fence1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB12_2
@@ -319,7 +319,7 @@ define i32 @imp_null_check_load_fence1(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
 entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
 is_null:
@@ -327,13 +327,13 @@ is_null:
 
 not_null:
   fence acquire
-  %t = load i32, i32* %x
+  %t = load i32, ptr %x
   ret i32 %t
 }
 
 ;; TODO: We could handle this case as we can lift the fence into the
 ;; previous block before the conditional without changing behavior.
-define i32 @imp_null_check_load_fence2(i32* %x) {
+define i32 @imp_null_check_load_fence2(ptr %x) {
 ; CHECK-LABEL: imp_null_check_load_fence2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB13_2
@@ -345,7 +345,7 @@ define i32 @imp_null_check_load_fence2(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
 entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
 is_null:
@@ -353,12 +353,12 @@ is_null:
 
 not_null:
   fence seq_cst
-  %t = load i32, i32* %x
+  %t = load i32, ptr %x
   ret i32 %t
 }
 
 ; TODO: We can fold to implicit null here, not sure why this isn't working
-define void @imp_null_check_store(i32* %x) {
+define void @imp_null_check_store(ptr %x) {
 ; CHECK-LABEL: imp_null_check_store:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB14_2
@@ -368,19 +368,19 @@ define void @imp_null_check_store(i32* %x) {
 ; CHECK-NEXT:  .LBB14_2: // %common.ret
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret void
 
  not_null:
-  store i32 1, i32* %x
+  store i32 1, ptr %x
   ret void
 }
 
 ;; TODO: can be implicit
-define void @imp_null_check_unordered_store(i32* %x) {
+define void @imp_null_check_unordered_store(ptr %x) {
 ; CHECK-LABEL: imp_null_check_unordered_store:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB15_2
@@ -390,18 +390,18 @@ define void @imp_null_check_unordered_store(i32* %x) {
 ; CHECK-NEXT:  .LBB15_2: // %common.ret
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret void
 
  not_null:
-  store atomic i32 1, i32* %x unordered, align 4
+  store atomic i32 1, ptr %x unordered, align 4
   ret void
 }
 
-define i32 @imp_null_check_neg_gep_load(i32* %x) {
+define i32 @imp_null_check_neg_gep_load(ptr %x) {
 ; CHECK-LABEL: imp_null_check_neg_gep_load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:  .Ltmp7:
@@ -412,15 +412,15 @@ define i32 @imp_null_check_neg_gep_load(i32* %x) {
 ; CHECK-NEXT:    mov w0, #42
 ; CHECK-NEXT:    ret
  entry:
-  %c = icmp eq i32* %x, null
+  %c = icmp eq ptr %x, null
   br i1 %c, label %is_null, label %not_null, !make.implicit !0
 
  is_null:
   ret i32 42
 
  not_null:
-  %x.gep = getelementptr i32, i32* %x, i32 -32
-  %t = load i32, i32* %x.gep
+  %x.gep = getelementptr i32, ptr %x, i32 -32
+  %t = load i32, ptr %x.gep
   ret i32 %t
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/init-array.ll b/llvm/test/CodeGen/AArch64/init-array.ll
index 825f1ad0b2385..d5d4d513508da 100644
--- a/llvm/test/CodeGen/AArch64/init-array.ll
+++ b/llvm/test/CodeGen/AArch64/init-array.ll
@@ -5,6 +5,6 @@ define internal void @_GLOBAL__I_a() section ".text.startup" {
   ret void
 }
 
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_a, i8* null }]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__I_a, ptr null }]
 
 ; CHECK: .section .init_array

diff  --git a/llvm/test/CodeGen/AArch64/inline-asm-blockaddress.ll b/llvm/test/CodeGen/AArch64/inline-asm-blockaddress.ll
index 6384783540489..e5a9998eefd48 100644
--- a/llvm/test/CodeGen/AArch64/inline-asm-blockaddress.ll
+++ b/llvm/test/CodeGen/AArch64/inline-asm-blockaddress.ll
@@ -5,8 +5,8 @@ define void @foo() {
 entry:
   br label %bar
 bar:
-  call void asm sideeffect "#TEST $0", "i,~{dirflag},~{fpsr},~{flags}"(i8* blockaddress(@foo, %bar))
+  call void asm sideeffect "#TEST $0", "i,~{dirflag},~{fpsr},~{flags}"(ptr blockaddress(@foo, %bar))
   ret void
 indirectgoto:
-  indirectbr i8* undef, [label %bar]
+  indirectbr ptr undef, [label %bar]
 }

diff  --git a/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll b/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
index aa25d118c9b5d..78a4af02d7e0d 100644
--- a/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
+++ b/llvm/test/CodeGen/AArch64/inline-asm-constraints-bad-sve.ll
@@ -8,9 +8,9 @@ target triple = "aarch64-unknown-linux-gnu"
 ; CHECK: error: couldn't allocate output register for constraint 'w'
 ; CHECK: error: unknown token in expression
 
-define <vscale x 16 x i1> @foo1(i32 *%in) {
+define <vscale x 16 x i1> @foo1(ptr %in) {
 entry:
-  %0 = load i32, i32* %in, align 4
+  %0 = load i32, ptr %in, align 4
   %1 = call <vscale x 16 x i1> asm sideeffect "mov $0.b, $1.b \0A", "=@3Upa, at 3Upa"(i32 %0)
   ret <vscale x 16 x i1> %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/inline-asm-globaladdress.ll b/llvm/test/CodeGen/AArch64/inline-asm-globaladdress.ll
index adebabaa29bec..7a44fc206d699 100644
--- a/llvm/test/CodeGen/AArch64/inline-asm-globaladdress.ll
+++ b/llvm/test/CodeGen/AArch64/inline-asm-globaladdress.ll
@@ -6,14 +6,14 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 ; CHECK-LABEL: test_inlineasm_globaladdress:
 ; CHECK: b {{_?}}test_symbol
 define void @test_inlineasm_globaladdress() {
-  call void asm sideeffect "b $0", "i"(void ()* @test_symbol)
+  call void asm sideeffect "b $0", "i"(ptr @test_symbol)
   ret void
 }
 
 ; CHECK-LABEL: test_inlineasm_globaladdress_offset:
 ; CHECK: b {{_?}}test_symbol+4
 define void @test_inlineasm_globaladdress_offset() {
-  call void asm sideeffect "b $0", "i"(void ()* bitcast (i8* getelementptr (i8, i8* bitcast (void ()* @test_symbol to i8*), i64 4) to void ()*))
+  call void asm sideeffect "b $0", "i"(ptr getelementptr (i8, ptr @test_symbol, i64 4))
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll b/llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll
index 5152302b11b97..a42d31faf6bbb 100644
--- a/llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll
+++ b/llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll
@@ -7,6 +7,6 @@
 define void @bar() {
 ; access foo[1][1]
 ; CHECK: // foo+12
-  tail call void asm sideeffect "// ${0:c}", "i"(i32* getelementptr inbounds ([2 x [2 x i32]], [2 x [2 x i32]]* @foo, i64 0, i64 1, i64 1))
+  tail call void asm sideeffect "// ${0:c}", "i"(ptr getelementptr inbounds ([2 x [2 x i32]], ptr @foo, i64 0, i64 1, i64 1))
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/inlineasm-S-constraint.ll b/llvm/test/CodeGen/AArch64/inlineasm-S-constraint.ll
index 16583bb69c03f..18b4ef955236a 100644
--- a/llvm/test/CodeGen/AArch64/inlineasm-S-constraint.ll
+++ b/llvm/test/CodeGen/AArch64/inlineasm-S-constraint.ll
@@ -2,15 +2,15 @@
 @var = global i32 0
 define void @test_inline_constraint_S() {
 ; CHECK-LABEL: test_inline_constraint_S:
-  call void asm sideeffect "adrp x0, $0", "S"(i32* @var)
-  call void asm sideeffect "add x0, x0, :lo12:$0", "S"(i32* @var)
+  call void asm sideeffect "adrp x0, $0", "S"(ptr @var)
+  call void asm sideeffect "add x0, x0, :lo12:$0", "S"(ptr @var)
 ; CHECK: adrp x0, var
 ; CHECK: add x0, x0, :lo12:var
   ret void
 }
 define i32 @test_inline_constraint_S_label(i1 %in) {
 ; CHECK-LABEL: test_inline_constraint_S_label:
-  call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label, %loc))
+  call void asm sideeffect "adr x0, $0", "S"(ptr blockaddress(@test_inline_constraint_S_label, %loc))
 ; CHECK: adr x0, .Ltmp{{[0-9]+}}
 br i1 %in, label %loc, label %loc2
 loc:
@@ -20,7 +20,7 @@ loc2:
 }
 define i32 @test_inline_constraint_S_label_tailmerged(i1 %in) {
 ; CHECK-LABEL: test_inline_constraint_S_label_tailmerged:
-  call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label_tailmerged, %loc))
+  call void asm sideeffect "adr x0, $0", "S"(ptr blockaddress(@test_inline_constraint_S_label_tailmerged, %loc))
 ; CHECK: adr x0, .Ltmp{{[0-9]+}}
 br i1 %in, label %loc, label %loc2
 loc:
@@ -34,7 +34,7 @@ common.ret:
 
 define i32 @test_inline_constraint_S_label_tailmerged2(i1 %in) {
 ; CHECK-LABEL: test_inline_constraint_S_label_tailmerged2:
-  call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label_tailmerged2, %loc))
+  call void asm sideeffect "adr x0, $0", "S"(ptr blockaddress(@test_inline_constraint_S_label_tailmerged2, %loc))
 ; CHECK: adr x0, .Ltmp{{[0-9]+}}
   br i1 %in, label %loc, label %loc2
 common.ret:

diff  --git a/llvm/test/CodeGen/AArch64/inlineasm-illegal-type.ll b/llvm/test/CodeGen/AArch64/inlineasm-illegal-type.ll
index 18d6c18979bc0..e75742b77447d 100644
--- a/llvm/test/CodeGen/AArch64/inlineasm-illegal-type.ll
+++ b/llvm/test/CodeGen/AArch64/inlineasm-illegal-type.ll
@@ -17,16 +17,16 @@ entry:
   ret double %0
 }
 
-define void @test_vector_too_large(<8 x float>* nocapture readonly %0) {
+define void @test_vector_too_large(ptr nocapture readonly %0) {
 entry:
-  %m = load <8 x float>, <8 x float>* %0, align 16
+  %m = load <8 x float>, ptr %0, align 16
   tail call void asm sideeffect "fadd.4s v4, v4, $0", "w,~{memory}"(<8 x float> %m)
   ret void
 }
 
-define void @test_vector_no_mvt(<9 x float>* nocapture readonly %0) {
+define void @test_vector_no_mvt(ptr nocapture readonly %0) {
 entry:
-  %m = load <9 x float>, <9 x float>* %0, align 16
+  %m = load <9 x float>, ptr %0, align 16
   tail call void asm sideeffect "fadd.4s v4, v4, $0", "w,~{memory}"(<9 x float> %m)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/inlineasm-output-template.ll b/llvm/test/CodeGen/AArch64/inlineasm-output-template.ll
index 2e76ff89f4521..da751baf07e9a 100644
--- a/llvm/test/CodeGen/AArch64/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/AArch64/inlineasm-output-template.ll
@@ -14,7 +14,7 @@ define dso_local i32 @test_inlineasm_c_output_template0() {
 ; CHECK: TEST {{_?}}baz
 @baz = internal global i32 0, align 4
 define dso_local i32 @test_inlineasm_c_output_template1() {
-  tail call void asm sideeffect "//TEST ${0:c}", "i"(i32* nonnull @baz)
+  tail call void asm sideeffect "//TEST ${0:c}", "i"(ptr nonnull @baz)
   ret i32 43
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/insert-extend.ll b/llvm/test/CodeGen/AArch64/insert-extend.ll
index c27ed21e558e2..e331a8dca6976 100644
--- a/llvm/test/CodeGen/AArch64/insert-extend.ll
+++ b/llvm/test/CodeGen/AArch64/insert-extend.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64--linux-gnu | FileCheck %s
 
-define <8 x i8> @load4_v4i8_add(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
+define <8 x i8> @load4_v4i8_add(float %tmp, ptr %a, ptr %b) {
 ; CHECK-LABEL: load4_v4i8_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s0, s1, [x0]
@@ -9,19 +9,19 @@ define <8 x i8> @load4_v4i8_add(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
 ; CHECK-NEXT:    ld1 { v1.s }[1], [x1]
 ; CHECK-NEXT:    add v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %la = load <4 x i8>, <4 x i8> *%a
-  %lb = load <4 x i8>, <4 x i8> *%b
-  %c = getelementptr <4 x i8>, <4 x i8> *%a, i64 1
-  %d = getelementptr <4 x i8>, <4 x i8> *%b, i64 1
-  %lc = load <4 x i8>, <4 x i8> *%c
-  %ld = load <4 x i8>, <4 x i8> *%d
+  %la = load <4 x i8>, ptr %a
+  %lb = load <4 x i8>, ptr %b
+  %c = getelementptr <4 x i8>, ptr %a, i64 1
+  %d = getelementptr <4 x i8>, ptr %b, i64 1
+  %lc = load <4 x i8>, ptr %c
+  %ld = load <4 x i8>, ptr %d
   %s1 = shufflevector <4 x i8> %la, <4 x i8> %lb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s2 = shufflevector <4 x i8> %lc, <4 x i8> %ld, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %add = add <8 x i8> %s1, %s2
   ret <8 x i8> %add
 }
 
-define <8 x i16> @load4_v4i8_zext_add(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
+define <8 x i16> @load4_v4i8_zext_add(float %tmp, ptr %a, ptr %b) {
 ; CHECK-LABEL: load4_v4i8_zext_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s0, s1, [x0]
@@ -29,12 +29,12 @@ define <8 x i16> @load4_v4i8_zext_add(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
 ; CHECK-NEXT:    ld1 { v1.s }[1], [x1]
 ; CHECK-NEXT:    uaddl v0.8h, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %la = load <4 x i8>, <4 x i8> *%a
-  %lb = load <4 x i8>, <4 x i8> *%b
-  %c = getelementptr <4 x i8>, <4 x i8> *%a, i64 1
-  %d = getelementptr <4 x i8>, <4 x i8> *%b, i64 1
-  %lc = load <4 x i8>, <4 x i8> *%c
-  %ld = load <4 x i8>, <4 x i8> *%d
+  %la = load <4 x i8>, ptr %a
+  %lb = load <4 x i8>, ptr %b
+  %c = getelementptr <4 x i8>, ptr %a, i64 1
+  %d = getelementptr <4 x i8>, ptr %b, i64 1
+  %lc = load <4 x i8>, ptr %c
+  %ld = load <4 x i8>, ptr %d
   %s1 = shufflevector <4 x i8> %la, <4 x i8> %lb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s2 = shufflevector <4 x i8> %lc, <4 x i8> %ld, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %z1 = zext <8 x i8> %s1 to <8 x i16>
@@ -43,7 +43,7 @@ define <8 x i16> @load4_v4i8_zext_add(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
   ret <8 x i16> %add
 }
 
-define i32 @large(i8* nocapture noundef readonly %p1, i32 noundef %st1, i8* nocapture noundef readonly %p2, i32 noundef %st2) {
+define i32 @large(ptr nocapture noundef readonly %p1, i32 noundef %st1, ptr nocapture noundef readonly %p2, i32 noundef %st2) {
 ; CHECK-LABEL: large:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $w3 killed $w3 def $x3
@@ -172,104 +172,88 @@ define i32 @large(i8* nocapture noundef readonly %p1, i32 noundef %st1, i8* noca
 entry:
   %idx.ext = sext i32 %st1 to i64
   %idx.ext63 = sext i32 %st2 to i64
-  %arrayidx3 = getelementptr inbounds i8, i8* %p1, i64 4
-  %arrayidx5 = getelementptr inbounds i8, i8* %p2, i64 4
-  %0 = bitcast i8* %p1 to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  %2 = bitcast i8* %p2 to <4 x i8>*
-  %3 = load <4 x i8>, <4 x i8>* %2, align 1
-  %4 = bitcast i8* %arrayidx3 to <4 x i8>*
-  %5 = load <4 x i8>, <4 x i8>* %4, align 1
-  %6 = bitcast i8* %arrayidx5 to <4 x i8>*
-  %7 = load <4 x i8>, <4 x i8>* %6, align 1
-  %add.ptr = getelementptr inbounds i8, i8* %p1, i64 %idx.ext
-  %add.ptr64 = getelementptr inbounds i8, i8* %p2, i64 %idx.ext63
-  %arrayidx3.1 = getelementptr inbounds i8, i8* %add.ptr, i64 4
-  %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
-  %8 = bitcast i8* %add.ptr to <4 x i8>*
-  %9 = load <4 x i8>, <4 x i8>* %8, align 1
-  %10 = bitcast i8* %add.ptr64 to <4 x i8>*
-  %11 = load <4 x i8>, <4 x i8>* %10, align 1
-  %12 = bitcast i8* %arrayidx3.1 to <4 x i8>*
-  %13 = load <4 x i8>, <4 x i8>* %12, align 1
-  %14 = bitcast i8* %arrayidx5.1 to <4 x i8>*
-  %15 = load <4 x i8>, <4 x i8>* %14, align 1
-  %add.ptr.1 = getelementptr inbounds i8, i8* %add.ptr, i64 %idx.ext
-  %add.ptr64.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 %idx.ext63
-  %arrayidx3.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 4
-  %arrayidx5.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 4
-  %16 = bitcast i8* %add.ptr.1 to <4 x i8>*
-  %17 = load <4 x i8>, <4 x i8>* %16, align 1
-  %18 = bitcast i8* %add.ptr64.1 to <4 x i8>*
-  %19 = load <4 x i8>, <4 x i8>* %18, align 1
-  %20 = bitcast i8* %arrayidx3.2 to <4 x i8>*
-  %21 = load <4 x i8>, <4 x i8>* %20, align 1
-  %22 = bitcast i8* %arrayidx5.2 to <4 x i8>*
-  %23 = load <4 x i8>, <4 x i8>* %22, align 1
-  %add.ptr.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 %idx.ext
-  %add.ptr64.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 %idx.ext63
-  %arrayidx3.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 4
-  %arrayidx5.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 4
-  %24 = bitcast i8* %add.ptr.2 to <4 x i8>*
-  %25 = load <4 x i8>, <4 x i8>* %24, align 1
-  %26 = shufflevector <4 x i8> %25, <4 x i8> %17, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %27 = shufflevector <4 x i8> %9, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %28 = shufflevector <16 x i8> %26, <16 x i8> %27, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
-  %29 = shufflevector <4 x i8> %1, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %30 = shufflevector <16 x i8> %28, <16 x i8> %29, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
-  %31 = zext <16 x i8> %30 to <16 x i32>
-  %32 = bitcast i8* %add.ptr64.2 to <4 x i8>*
-  %33 = load <4 x i8>, <4 x i8>* %32, align 1
-  %34 = shufflevector <4 x i8> %33, <4 x i8> %19, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %35 = shufflevector <4 x i8> %11, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %36 = shufflevector <16 x i8> %34, <16 x i8> %35, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
-  %37 = shufflevector <4 x i8> %3, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %38 = shufflevector <16 x i8> %36, <16 x i8> %37, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
-  %39 = zext <16 x i8> %38 to <16 x i32>
-  %40 = sub nsw <16 x i32> %31, %39
-  %41 = bitcast i8* %arrayidx3.3 to <4 x i8>*
-  %42 = load <4 x i8>, <4 x i8>* %41, align 1
-  %43 = shufflevector <4 x i8> %42, <4 x i8> %21, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %44 = shufflevector <4 x i8> %13, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %45 = shufflevector <16 x i8> %43, <16 x i8> %44, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
-  %46 = shufflevector <4 x i8> %5, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %47 = shufflevector <16 x i8> %45, <16 x i8> %46, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
-  %48 = zext <16 x i8> %47 to <16 x i32>
-  %49 = bitcast i8* %arrayidx5.3 to <4 x i8>*
-  %50 = load <4 x i8>, <4 x i8>* %49, align 1
-  %51 = shufflevector <4 x i8> %50, <4 x i8> %23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %52 = shufflevector <4 x i8> %15, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %53 = shufflevector <16 x i8> %51, <16 x i8> %52, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
-  %54 = shufflevector <4 x i8> %7, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %55 = shufflevector <16 x i8> %53, <16 x i8> %54, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
-  %56 = zext <16 x i8> %55 to <16 x i32>
-  %57 = sub nsw <16 x i32> %48, %56
-  %58 = shl nsw <16 x i32> %57, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-  %59 = add nsw <16 x i32> %58, %40
-  %60 = shufflevector <16 x i32> %59, <16 x i32> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
-  %61 = add nsw <16 x i32> %59, %60
-  %62 = sub nsw <16 x i32> %59, %60
-  %63 = shufflevector <16 x i32> %61, <16 x i32> %62, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 22, i32 18, i32 26, i32 30, i32 5, i32 1, i32 9, i32 13, i32 20, i32 16, i32 24, i32 28>
-  %64 = shufflevector <16 x i32> %61, <16 x i32> %62, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 20, i32 16, i32 24, i32 28, i32 7, i32 3, i32 11, i32 15, i32 22, i32 18, i32 26, i32 30>
-  %65 = add nsw <16 x i32> %63, %64
-  %66 = sub nsw <16 x i32> %63, %64
-  %67 = shufflevector <16 x i32> %65, <16 x i32> %66, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  %68 = shufflevector <16 x i32> %65, <16 x i32> %66, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 25, i32 24, i32 27, i32 26, i32 29, i32 28, i32 31, i32 30>
-  %69 = add nsw <16 x i32> %67, %68
-  %70 = sub nsw <16 x i32> %67, %68
-  %71 = shufflevector <16 x i32> %69, <16 x i32> %70, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 20, i32 5, i32 6, i32 23, i32 24, i32 9, i32 10, i32 27, i32 28, i32 13, i32 14, i32 31>
-  %72 = shufflevector <16 x i32> %69, <16 x i32> %70, <16 x i32> <i32 2, i32 19, i32 0, i32 17, i32 23, i32 6, i32 5, i32 20, i32 27, i32 10, i32 9, i32 24, i32 31, i32 14, i32 13, i32 28>
-  %73 = add nsw <16 x i32> %71, %72
-  %74 = sub nsw <16 x i32> %71, %72
-  %75 = shufflevector <16 x i32> %73, <16 x i32> %74, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
-  %76 = lshr <16 x i32> %75, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
-  %77 = and <16 x i32> %76, <i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537>
-  %78 = mul nuw <16 x i32> %77, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
-  %79 = add <16 x i32> %78, %75
-  %80 = xor <16 x i32> %79, %78
-  %81 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %80)
-  %conv118 = and i32 %81, 65535
-  %shr = lshr i32 %81, 16
+  %arrayidx3 = getelementptr inbounds i8, ptr %p1, i64 4
+  %arrayidx5 = getelementptr inbounds i8, ptr %p2, i64 4
+  %0 = load <4 x i8>, ptr %p1, align 1
+  %1 = load <4 x i8>, ptr %p2, align 1
+  %2 = load <4 x i8>, ptr %arrayidx3, align 1
+  %3 = load <4 x i8>, ptr %arrayidx5, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %p1, i64 %idx.ext
+  %add.ptr64 = getelementptr inbounds i8, ptr %p2, i64 %idx.ext63
+  %arrayidx3.1 = getelementptr inbounds i8, ptr %add.ptr, i64 4
+  %arrayidx5.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 4
+  %4 = load <4 x i8>, ptr %add.ptr, align 1
+  %5 = load <4 x i8>, ptr %add.ptr64, align 1
+  %6 = load <4 x i8>, ptr %arrayidx3.1, align 1
+  %7 = load <4 x i8>, ptr %arrayidx5.1, align 1
+  %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
+  %add.ptr64.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 %idx.ext63
+  %arrayidx3.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 4
+  %arrayidx5.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 4
+  %8 = load <4 x i8>, ptr %add.ptr.1, align 1
+  %9 = load <4 x i8>, ptr %add.ptr64.1, align 1
+  %10 = load <4 x i8>, ptr %arrayidx3.2, align 1
+  %11 = load <4 x i8>, ptr %arrayidx5.2, align 1
+  %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
+  %add.ptr64.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 %idx.ext63
+  %arrayidx3.3 = getelementptr inbounds i8, ptr %add.ptr.2, i64 4
+  %arrayidx5.3 = getelementptr inbounds i8, ptr %add.ptr64.2, i64 4
+  %12 = load <4 x i8>, ptr %add.ptr.2, align 1
+  %13 = shufflevector <4 x i8> %12, <4 x i8> %8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %14 = shufflevector <4 x i8> %4, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %15 = shufflevector <16 x i8> %13, <16 x i8> %14, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
+  %16 = shufflevector <4 x i8> %0, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %17 = shufflevector <16 x i8> %15, <16 x i8> %16, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+  %18 = zext <16 x i8> %17 to <16 x i32>
+  %19 = load <4 x i8>, ptr %add.ptr64.2, align 1
+  %20 = shufflevector <4 x i8> %19, <4 x i8> %9, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %21 = shufflevector <4 x i8> %5, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %22 = shufflevector <16 x i8> %20, <16 x i8> %21, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
+  %23 = shufflevector <4 x i8> %1, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %24 = shufflevector <16 x i8> %22, <16 x i8> %23, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+  %25 = zext <16 x i8> %24 to <16 x i32>
+  %26 = sub nsw <16 x i32> %18, %25
+  %27 = load <4 x i8>, ptr %arrayidx3.3, align 1
+  %28 = shufflevector <4 x i8> %27, <4 x i8> %10, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %29 = shufflevector <4 x i8> %6, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %30 = shufflevector <16 x i8> %28, <16 x i8> %29, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
+  %31 = shufflevector <4 x i8> %2, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %32 = shufflevector <16 x i8> %30, <16 x i8> %31, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+  %33 = zext <16 x i8> %32 to <16 x i32>
+  %34 = load <4 x i8>, ptr %arrayidx5.3, align 1
+  %35 = shufflevector <4 x i8> %34, <4 x i8> %11, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %36 = shufflevector <4 x i8> %7, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %37 = shufflevector <16 x i8> %35, <16 x i8> %36, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
+  %38 = shufflevector <4 x i8> %3, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %39 = shufflevector <16 x i8> %37, <16 x i8> %38, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+  %40 = zext <16 x i8> %39 to <16 x i32>
+  %41 = sub nsw <16 x i32> %33, %40
+  %42 = shl nsw <16 x i32> %41, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %43 = add nsw <16 x i32> %42, %26
+  %44 = shufflevector <16 x i32> %43, <16 x i32> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+  %45 = add nsw <16 x i32> %43, %44
+  %46 = sub nsw <16 x i32> %43, %44
+  %47 = shufflevector <16 x i32> %45, <16 x i32> %46, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 22, i32 18, i32 26, i32 30, i32 5, i32 1, i32 9, i32 13, i32 20, i32 16, i32 24, i32 28>
+  %48 = shufflevector <16 x i32> %45, <16 x i32> %46, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 20, i32 16, i32 24, i32 28, i32 7, i32 3, i32 11, i32 15, i32 22, i32 18, i32 26, i32 30>
+  %49 = add nsw <16 x i32> %47, %48
+  %50 = sub nsw <16 x i32> %47, %48
+  %51 = shufflevector <16 x i32> %49, <16 x i32> %50, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %52 = shufflevector <16 x i32> %49, <16 x i32> %50, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 25, i32 24, i32 27, i32 26, i32 29, i32 28, i32 31, i32 30>
+  %53 = add nsw <16 x i32> %51, %52
+  %54 = sub nsw <16 x i32> %51, %52
+  %55 = shufflevector <16 x i32> %53, <16 x i32> %54, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 20, i32 5, i32 6, i32 23, i32 24, i32 9, i32 10, i32 27, i32 28, i32 13, i32 14, i32 31>
+  %56 = shufflevector <16 x i32> %53, <16 x i32> %54, <16 x i32> <i32 2, i32 19, i32 0, i32 17, i32 23, i32 6, i32 5, i32 20, i32 27, i32 10, i32 9, i32 24, i32 31, i32 14, i32 13, i32 28>
+  %57 = add nsw <16 x i32> %55, %56
+  %58 = sub nsw <16 x i32> %55, %56
+  %59 = shufflevector <16 x i32> %57, <16 x i32> %58, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 4, i32 5, i32 22, i32 23, i32 8, i32 9, i32 26, i32 27, i32 12, i32 13, i32 30, i32 31>
+  %60 = lshr <16 x i32> %59, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
+  %61 = and <16 x i32> %60, <i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537, i32 65537>
+  %62 = mul nuw <16 x i32> %61, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+  %63 = add <16 x i32> %62, %59
+  %64 = xor <16 x i32> %63, %62
+  %65 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %64)
+  %conv118 = and i32 %65, 65535
+  %shr = lshr i32 %65, 16
   %add119 = add nuw nsw i32 %conv118, %shr
   %shr120 = lshr i32 %add119, 1
   ret i32 %shr120

diff  --git a/llvm/test/CodeGen/AArch64/insert-subvector.ll b/llvm/test/CodeGen/AArch64/insert-subvector.ll
index ed4c0c5958861..d86b3b526eed7 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector.ll
@@ -257,19 +257,19 @@ define <4 x i32> @insert_v4i32_2_2(float %tmp, <4 x i32> %b, <4 x i32> %a) {
 
 ; i8
 
-define <16 x i8> @load_v16i8_4_1(float %tmp, <16 x i8> %b, <4 x i8> *%a) {
+define <16 x i8> @load_v16i8_4_1(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_4_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ld1 { v0.s }[0], [x0]
 ; CHECK-NEXT:    ret
-  %l = load <4 x i8>, <4 x i8> *%a
+  %l = load <4 x i8>, ptr %a
   %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <16 x i8> %s1, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <16 x i8> %s2
 }
 
-define <16 x i8> @load_v16i8_4_15(float %tmp, <16 x i8> %b, <4 x i8> *%a) {
+define <16 x i8> @load_v16i8_4_15(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_4_15:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, .LCPI24_0
@@ -278,49 +278,49 @@ define <16 x i8> @load_v16i8_4_15(float %tmp, <16 x i8> %b, <4 x i8> *%a) {
 ; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI24_0]
 ; CHECK-NEXT:    tbl v0.16b, { v0.16b, v1.16b }, v2.16b
 ; CHECK-NEXT:    ret
-  %l = load <4 x i8>, <4 x i8> *%a
+  %l = load <4 x i8>, ptr %a
   %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <16 x i8> %s1, <16 x i8> %b, <16 x i32> <i32 16, i32 17, i32 0, i32 1, i32 2, i32 3, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <16 x i8> %s2
 }
 
-define <16 x i8> @load_v16i8_4_2(float %tmp, <16 x i8> %b, <4 x i8> *%a) {
+define <16 x i8> @load_v16i8_4_2(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_4_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x0]
 ; CHECK-NEXT:    ret
-  %l = load <4 x i8>, <4 x i8> *%a
+  %l = load <4 x i8>, ptr %a
   %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <16 x i8> %s1, <16 x i8> %b, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 0, i32 1, i32 2, i32 3, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <16 x i8> %s2
 }
 
-define <16 x i8> @load_v16i8_4_3(float %tmp, <16 x i8> %b, <4 x i8> *%a) {
+define <16 x i8> @load_v16i8_4_3(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_4_3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ld1 { v0.s }[2], [x0]
 ; CHECK-NEXT:    ret
-  %l = load <4 x i8>, <4 x i8> *%a
+  %l = load <4 x i8>, ptr %a
   %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <16 x i8> %s1, <16 x i8> %b, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 1, i32 2, i32 3, i32 28, i32 29, i32 30, i32 31>
   ret <16 x i8> %s2
 }
 
-define <16 x i8> @load_v16i8_4_4(float %tmp, <16 x i8> %b, <4 x i8> *%a) {
+define <16 x i8> @load_v16i8_4_4(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_4_4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ld1 { v0.s }[3], [x0]
 ; CHECK-NEXT:    ret
-  %l = load <4 x i8>, <4 x i8> *%a
+  %l = load <4 x i8>, ptr %a
   %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <16 x i8> %s1, <16 x i8> %b, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 0, i32 1, i32 2, i32 3>
   ret <16 x i8> %s2
 }
 
-define <8 x i8> @load_v8i8_4_1(float %tmp, <8 x i8> %b, <4 x i8> *%a) {
+define <8 x i8> @load_v8i8_4_1(float %tmp, <8 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i8_4_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -328,13 +328,13 @@ define <8 x i8> @load_v8i8_4_1(float %tmp, <8 x i8> %b, <4 x i8> *%a) {
 ; CHECK-NEXT:    mov v0.s[1], v1.s[1]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
-  %l = load <4 x i8>, <4 x i8> *%a
+  %l = load <4 x i8>, ptr %a
   %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
   ret <8 x i8> %s2
 }
 
-define <8 x i8> @load_v8i8_4_2(float %tmp, <8 x i8> %b, <4 x i8> *%a) {
+define <8 x i8> @load_v8i8_4_2(float %tmp, <8 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i8_4_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov d0, d1
@@ -342,33 +342,33 @@ define <8 x i8> @load_v8i8_4_2(float %tmp, <8 x i8> %b, <4 x i8> *%a) {
 ; CHECK-NEXT:    mov v0.s[1], v1.s[0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
-  %l = load <4 x i8>, <4 x i8> *%a
+  %l = load <4 x i8>, ptr %a
   %s1 = shufflevector <4 x i8> %l, <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i8> %s1, <8 x i8> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 2, i32 3>
   ret <8 x i8> %s2
 }
 
-define <16 x i8> @load_v16i8_8_1(float %tmp, <16 x i8> %b, <8 x i8> *%a) {
+define <16 x i8> @load_v16i8_8_1(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_8_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %l = load <8 x i8>, <8 x i8> *%a
+  %l = load <8 x i8>, ptr %a
   %s1 = shufflevector <8 x i8> %l, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <16 x i8> %s1, <16 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <16 x i8> %s2
 }
 
-define <16 x i8> @load_v16i8_8_2(float %tmp, <16 x i8> %b, <8 x i8> *%a) {
+define <16 x i8> @load_v16i8_8_2(float %tmp, <16 x i8> %b, ptr %a) {
 ; CHECK-LABEL: load_v16i8_8_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %l = load <8 x i8>, <8 x i8> *%a
+  %l = load <8 x i8>, ptr %a
   %s1 = shufflevector <8 x i8> %l, <8 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <16 x i8> %s1, <16 x i8> %b, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <16 x i8> %s2
@@ -376,7 +376,7 @@ define <16 x i8> @load_v16i8_8_2(float %tmp, <16 x i8> %b, <8 x i8> *%a) {
 
 ; i16
 
-define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
+define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_2_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w9, [x0]
@@ -387,13 +387,13 @@ define <8 x i16> @load_v8i16_2_1(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
 ; CHECK-NEXT:    xtn v1.4h, v2.4s
 ; CHECK-NEXT:    mov v0.s[0], v1.s[0]
 ; CHECK-NEXT:    ret
-  %l = load <2 x i16>, <2 x i16> *%a
+  %l = load <2 x i16>, ptr %a
   %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i16> %s1, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <8 x i16> %s2
 }
 
-define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
+define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_2_15:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w9, [x0]
@@ -406,13 +406,13 @@ define <8 x i16> @load_v8i16_2_15(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
 ; CHECK-NEXT:    xtn v0.4h, v2.4s
 ; CHECK-NEXT:    tbl v0.16b, { v0.16b, v1.16b }, v3.16b
 ; CHECK-NEXT:    ret
-  %l = load <2 x i16>, <2 x i16> *%a
+  %l = load <2 x i16>, ptr %a
   %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i16> %s1, <8 x i16> %b, <8 x i32> <i32 8, i32 0, i32 1, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <8 x i16> %s2
 }
 
-define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
+define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_2_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w9, [x0]
@@ -423,13 +423,13 @@ define <8 x i16> @load_v8i16_2_2(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
 ; CHECK-NEXT:    xtn v1.4h, v2.4s
 ; CHECK-NEXT:    mov v0.s[1], v1.s[0]
 ; CHECK-NEXT:    ret
-  %l = load <2 x i16>, <2 x i16> *%a
+  %l = load <2 x i16>, ptr %a
   %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i16> %s1, <8 x i16> %b, <8 x i32> <i32 8, i32 9, i32 0, i32 1, i32 12, i32 13, i32 14, i32 15>
   ret <8 x i16> %s2
 }
 
-define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
+define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_2_3:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w9, [x0]
@@ -440,13 +440,13 @@ define <8 x i16> @load_v8i16_2_3(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
 ; CHECK-NEXT:    xtn v1.4h, v2.4s
 ; CHECK-NEXT:    mov v0.s[2], v1.s[0]
 ; CHECK-NEXT:    ret
-  %l = load <2 x i16>, <2 x i16> *%a
+  %l = load <2 x i16>, ptr %a
   %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i16> %s1, <8 x i16> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 14, i32 15>
   ret <8 x i16> %s2
 }
 
-define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
+define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_2_4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w9, [x0]
@@ -457,13 +457,13 @@ define <8 x i16> @load_v8i16_2_4(float %tmp, <8 x i16> %b, <2 x i16> *%a) {
 ; CHECK-NEXT:    xtn v1.4h, v2.4s
 ; CHECK-NEXT:    mov v0.s[3], v1.s[0]
 ; CHECK-NEXT:    ret
-  %l = load <2 x i16>, <2 x i16> *%a
+  %l = load <2 x i16>, ptr %a
   %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i16> %s1, <8 x i16> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 1>
   ret <8 x i16> %s2
 }
 
-define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, <2 x i16> *%a) {
+define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v4i16_2_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.h }[0], [x0]
@@ -474,13 +474,13 @@ define <4 x i16> @load_v4i16_2_1(float %tmp, <4 x i16> %b, <2 x i16> *%a) {
 ; CHECK-NEXT:    mov v0.s[1], v1.s[1]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
-  %l = load <2 x i16>, <2 x i16> *%a
+  %l = load <2 x i16>, ptr %a
   %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s2 = shufflevector <4 x i16> %s1, <4 x i16> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
   ret <4 x i16> %s2
 }
 
-define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, <2 x i16> *%a) {
+define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v4i16_2_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.h }[0], [x0]
@@ -491,33 +491,33 @@ define <4 x i16> @load_v4i16_2_2(float %tmp, <4 x i16> %b, <2 x i16> *%a) {
 ; CHECK-NEXT:    mov v0.s[1], v2.s[0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
-  %l = load <2 x i16>, <2 x i16> *%a
+  %l = load <2 x i16>, ptr %a
   %s1 = shufflevector <2 x i16> %l, <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s2 = shufflevector <4 x i16> %s1, <4 x i16> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
   ret <4 x i16> %s2
 }
 
-define <8 x i16> @load_v8i16_4_1(float %tmp, <8 x i16> %b, <4 x i16> *%a) {
+define <8 x i16> @load_v8i16_4_1(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_4_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %l = load <4 x i16>, <4 x i16> *%a
+  %l = load <4 x i16>, ptr %a
   %s1 = shufflevector <4 x i16> %l, <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i16> %s1, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
   ret <8 x i16> %s2
 }
 
-define <8 x i16> @load_v8i16_4_2(float %tmp, <8 x i16> %b, <4 x i16> *%a) {
+define <8 x i16> @load_v8i16_4_2(float %tmp, <8 x i16> %b, ptr %a) {
 ; CHECK-LABEL: load_v8i16_4_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %l = load <4 x i16>, <4 x i16> *%a
+  %l = load <4 x i16>, ptr %a
   %s1 = shufflevector <4 x i16> %l, <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %s2 = shufflevector <8 x i16> %s1, <8 x i16> %b, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 0, i32 1, i32 2, i32 3>
   ret <8 x i16> %s2
@@ -525,27 +525,27 @@ define <8 x i16> @load_v8i16_4_2(float %tmp, <8 x i16> %b, <4 x i16> *%a) {
 
 ; i32
 
-define <4 x i32> @load_v4i32_2_1(float %tmp, <4 x i32> %b, <2 x i32> *%a) {
+define <4 x i32> @load_v4i32_2_1(float %tmp, <4 x i32> %b, ptr %a) {
 ; CHECK-LABEL: load_v4i32_2_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %l = load <2 x i32>, <2 x i32> *%a
+  %l = load <2 x i32>, ptr %a
   %s1 = shufflevector <2 x i32> %l, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s2 = shufflevector <4 x i32> %s1, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
   ret <4 x i32> %s2
 }
 
-define <4 x i32> @load_v4i32_2_2(float %tmp, <4 x i32> %b, <2 x i32> *%a) {
+define <4 x i32> @load_v4i32_2_2(float %tmp, <4 x i32> %b, ptr %a) {
 ; CHECK-LABEL: load_v4i32_2_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %l = load <2 x i32>, <2 x i32> *%a
+  %l = load <2 x i32>, ptr %a
   %s1 = shufflevector <2 x i32> %l, <2 x i32> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %s2 = shufflevector <4 x i32> %s1, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
   ret <4 x i32> %s2
@@ -553,38 +553,38 @@ define <4 x i32> @load_v4i32_2_2(float %tmp, <4 x i32> %b, <2 x i32> *%a) {
 
 ; More than a single vector
 
-define <8 x i8> @load2_v4i8(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
+define <8 x i8> @load2_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK-LABEL: load2_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x1]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    ret
-  %la = load <4 x i8>, <4 x i8> *%a
-  %lb = load <4 x i8>, <4 x i8> *%b
+  %la = load <4 x i8>, ptr %a
+  %lb = load <4 x i8>, ptr %b
   %s1 = shufflevector <4 x i8> %la, <4 x i8> %lb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i8> %s1
 }
 
-define <16 x i8> @load3_v4i8(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
+define <16 x i8> @load3_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK-LABEL: load3_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s0, s1, [x0]
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x1]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %la = load <4 x i8>, <4 x i8> *%a
-  %lb = load <4 x i8>, <4 x i8> *%b
-  %c = getelementptr <4 x i8>, <4 x i8> *%a, i64 1
-  %d = getelementptr <4 x i8>, <4 x i8> *%b, i64 1
-  %lc = load <4 x i8>, <4 x i8> *%c
+  %la = load <4 x i8>, ptr %a
+  %lb = load <4 x i8>, ptr %b
+  %c = getelementptr <4 x i8>, ptr %a, i64 1
+  %d = getelementptr <4 x i8>, ptr %b, i64 1
+  %lc = load <4 x i8>, ptr %c
   %s1 = shufflevector <4 x i8> %la, <4 x i8> %lb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s2 = shufflevector <4 x i8> %lc, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s3 = shufflevector <8 x i8> %s1, <8 x i8> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <16 x i8> %s3
 }
 
-define <16 x i8> @load4_v4i8(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
+define <16 x i8> @load4_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK-LABEL: load4_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s0, s1, [x0]
@@ -592,33 +592,33 @@ define <16 x i8> @load4_v4i8(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
 ; CHECK-NEXT:    ld1 { v1.s }[1], [x1]
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
-  %la = load <4 x i8>, <4 x i8> *%a
-  %lb = load <4 x i8>, <4 x i8> *%b
-  %c = getelementptr <4 x i8>, <4 x i8> *%a, i64 1
-  %d = getelementptr <4 x i8>, <4 x i8> *%b, i64 1
-  %lc = load <4 x i8>, <4 x i8> *%c
-  %ld = load <4 x i8>, <4 x i8> *%d
+  %la = load <4 x i8>, ptr %a
+  %lb = load <4 x i8>, ptr %b
+  %c = getelementptr <4 x i8>, ptr %a, i64 1
+  %d = getelementptr <4 x i8>, ptr %b, i64 1
+  %lc = load <4 x i8>, ptr %c
+  %ld = load <4 x i8>, ptr %d
   %s1 = shufflevector <4 x i8> %la, <4 x i8> %lb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s2 = shufflevector <4 x i8> %lc, <4 x i8> %ld, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s3 = shufflevector <8 x i8> %s1, <8 x i8> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <16 x i8> %s3
 }
 
-define <16 x i8> @load2multi1_v4i8(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
+define <16 x i8> @load2multi1_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK-LABEL: load2multi1_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x1]
 ; CHECK-NEXT:    mov v0.d[1], v0.d[0]
 ; CHECK-NEXT:    ret
-  %la = load <4 x i8>, <4 x i8> *%a
-  %lb = load <4 x i8>, <4 x i8> *%b
+  %la = load <4 x i8>, ptr %a
+  %lb = load <4 x i8>, ptr %b
   %s1 = shufflevector <4 x i8> %la, <4 x i8> %lb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s3 = shufflevector <8 x i8> %s1, <8 x i8> %s1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <16 x i8> %s3
 }
 
-define <16 x i8> @load2multi2_v4i8(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
+define <16 x i8> @load2multi2_v4i8(float %tmp, ptr %a, ptr %b) {
 ; CHECK-LABEL: load2multi2_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x1]
@@ -629,15 +629,15 @@ define <16 x i8> @load2multi2_v4i8(float %tmp, <4 x i8> *%a, <4 x i8> *%b) {
 ; CHECK-NEXT:    mov v1.d[1], v1.d[0]
 ; CHECK-NEXT:    uzp1 v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %la = load <4 x i8>, <4 x i8> *%a
-  %lb = load <4 x i8>, <4 x i8> *%b
+  %la = load <4 x i8>, ptr %a
+  %lb = load <4 x i8>, ptr %b
   %s1 = shufflevector <4 x i8> %la, <4 x i8> %la, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s2 = shufflevector <4 x i8> %lb, <4 x i8> %lb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %s3 = shufflevector <8 x i8> %s1, <8 x i8> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <16 x i8> %s3
 }
 
-define void @loads_before_stores(i8* %i44) {
+define void @loads_before_stores(ptr %i44) {
 ; CHECK-LABEL: loads_before_stores:
 ; CHECK:       // %bb.0: // %bb
 ; CHECK-NEXT:    add x8, x0, #20
@@ -650,30 +650,27 @@ define void @loads_before_stores(i8* %i44) {
 ; CHECK-NEXT:    stur d0, [x0, #22]
 ; CHECK-NEXT:    ret
 bb:
-  %i45 = getelementptr inbounds i8, i8* %i44, i64 20
-  %i46 = getelementptr inbounds i8, i8* %i44, i64 26
-  %i48 = load i8, i8* %i46, align 1
-  %i49 = getelementptr inbounds i8, i8* %i44, i64 21
-  %i50 = getelementptr inbounds i8, i8* %i44, i64 27
-  %i52 = load i8, i8* %i50, align 1
-  %i53 = getelementptr inbounds i8, i8* %i44, i64 22
-  %i54 = getelementptr inbounds i8, i8* %i44, i64 28
-  %i61 = getelementptr inbounds i8, i8* %i44, i64 24
-  %i62 = getelementptr inbounds i8, i8* %i44, i64 30
-  %i63 = load i8, i8* %i61, align 1
-  %i65 = getelementptr inbounds i8, i8* %i44, i64 25
-  %i66 = getelementptr inbounds i8, i8* %i44, i64 31
-  %i67 = load i8, i8* %i65, align 1
-  %0 = bitcast i8* %i45 to <4 x i8>*
-  %1 = load <4 x i8>, <4 x i8>* %0, align 1
-  store i8 %i48, i8* %i45, align 1
-  store i8 %i52, i8* %i49, align 1
-  %2 = bitcast i8* %i54 to <4 x i8>*
-  %3 = load <4 x i8>, <4 x i8>* %2, align 1
-  store i8 %i63, i8* %i62, align 1
-  %4 = shufflevector <4 x i8> %3, <4 x i8> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %5 = bitcast i8* %i53 to <8 x i8>*
-  store <8 x i8> %4, <8 x i8>* %5, align 1
-  store i8 %i67, i8* %i66, align 1
+  %i45 = getelementptr inbounds i8, ptr %i44, i64 20
+  %i46 = getelementptr inbounds i8, ptr %i44, i64 26
+  %i48 = load i8, ptr %i46, align 1
+  %i49 = getelementptr inbounds i8, ptr %i44, i64 21
+  %i50 = getelementptr inbounds i8, ptr %i44, i64 27
+  %i52 = load i8, ptr %i50, align 1
+  %i53 = getelementptr inbounds i8, ptr %i44, i64 22
+  %i54 = getelementptr inbounds i8, ptr %i44, i64 28
+  %i61 = getelementptr inbounds i8, ptr %i44, i64 24
+  %i62 = getelementptr inbounds i8, ptr %i44, i64 30
+  %i63 = load i8, ptr %i61, align 1
+  %i65 = getelementptr inbounds i8, ptr %i44, i64 25
+  %i66 = getelementptr inbounds i8, ptr %i44, i64 31
+  %i67 = load i8, ptr %i65, align 1
+  %0 = load <4 x i8>, ptr %i45, align 1
+  store i8 %i48, ptr %i45, align 1
+  store i8 %i52, ptr %i49, align 1
+  %1 = load <4 x i8>, ptr %i54, align 1
+  store i8 %i63, ptr %i62, align 1
+  %2 = shufflevector <4 x i8> %1, <4 x i8> %0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  store <8 x i8> %2, ptr %i53, align 1
+  store i8 %i67, ptr %i66, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/int-to-fp-no-neon.ll b/llvm/test/CodeGen/AArch64/int-to-fp-no-neon.ll
index 82e625af477ba..478ccf58f32c5 100644
--- a/llvm/test/CodeGen/AArch64/int-to-fp-no-neon.ll
+++ b/llvm/test/CodeGen/AArch64/int-to-fp-no-neon.ll
@@ -6,7 +6,7 @@
 ;; Emit an object file so that verifyPredicates is called (it is not used for ASM output).
 ; RUN: llc -mtriple=aarch64 -mattr=-neon,+fullfp16 -o /dev/null %s --asm-show-inst -filetype=obj
 
-define double @ui8_to_double(i8* %i, float* %f) {
+define double @ui8_to_double(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: ui8_to_double:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr b0, [x0]
@@ -19,12 +19,12 @@ define double @ui8_to_double(i8* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    ucvtf d0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i8, i8* %i, align 1
+  %ld = load i8, ptr %i, align 1
   %conv = uitofp i8 %ld to double
   ret double %conv
 }
 
-define float @ui8_to_float(i8* %i, float* %f) {
+define float @ui8_to_float(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: ui8_to_float:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr b0, [x0]
@@ -37,24 +37,24 @@ define float @ui8_to_float(i8* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    ucvtf s0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i8, i8* %i, align 1
+  %ld = load i8, ptr %i, align 1
   %conv = uitofp i8 %ld to float
   ret float %conv
 }
 
-define half @ui8_to_half(i8* %i, half* %f) {
+define half @ui8_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: ui8_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
 ; CHECK-NEXT:    ucvtf h0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i8, i8* %i, align 1
+  %ld = load i8, ptr %i, align 1
   %conv = uitofp i8 %ld to half
   ret half %conv
 }
 
-define double @ui16_to_double(i16* %i, float* %f) {
+define double @ui16_to_double(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: ui16_to_double:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr h0, [x0]
@@ -67,12 +67,12 @@ define double @ui16_to_double(i16* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    ucvtf d0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i16, i16* %i, align 1
+  %ld = load i16, ptr %i, align 1
   %conv = uitofp i16 %ld to double
   ret double %conv
 }
 
-define float @ui16_to_float(i16* %i, float* %f) {
+define float @ui16_to_float(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: ui16_to_float:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr h0, [x0]
@@ -85,24 +85,24 @@ define float @ui16_to_float(i16* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    ucvtf s0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i16, i16* %i, align 1
+  %ld = load i16, ptr %i, align 1
   %conv = uitofp i16 %ld to float
   ret float %conv
 }
 
-define half @ui16_to_half(i16* %i, half* %f) {
+define half @ui16_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: ui16_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ucvtf h0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i16, i16* %i, align 1
+  %ld = load i16, ptr %i, align 1
   %conv = uitofp i16 %ld to half
   ret half %conv
 }
 
-define double @ui32_to_double(i32* %i, float* %f) {
+define double @ui32_to_double(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: ui32_to_double:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr s0, [x0]
@@ -115,12 +115,12 @@ define double @ui32_to_double(i32* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    ucvtf d0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i32, i32* %i, align 1
+  %ld = load i32, ptr %i, align 1
   %conv = uitofp i32 %ld to double
   ret double %conv
 }
 
-define float @ui32_to_float(i32* %i, float* %f) {
+define float @ui32_to_float(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: ui32_to_float:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr s0, [x0]
@@ -133,24 +133,24 @@ define float @ui32_to_float(i32* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    ucvtf s0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i32, i32* %i, align 1
+  %ld = load i32, ptr %i, align 1
   %conv = uitofp i32 %ld to float
   ret float %conv
 }
 
-define half @ui32_to_half(i32* %i, half* %f) {
+define half @ui32_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: ui32_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    ucvtf h0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i32, i32* %i, align 1
+  %ld = load i32, ptr %i, align 1
   %conv = uitofp i32 %ld to half
   ret half %conv
 }
 
-define double @ui64_to_double(i64* %i, float* %f) {
+define double @ui64_to_double(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: ui64_to_double:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr d0, [x0]
@@ -163,49 +163,49 @@ define double @ui64_to_double(i64* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    ucvtf d0, x8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i64, i64* %i, align 1
+  %ld = load i64, ptr %i, align 1
   %conv = uitofp i64 %ld to double
   ret double %conv
 }
 
-define float @ui64_to_float(i64* %i, float* %f) {
+define float @ui64_to_float(ptr %i, ptr %f) {
 ; CHECK-LABEL: ui64_to_float:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    ucvtf s0, x8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i64, i64* %i, align 1
+  %ld = load i64, ptr %i, align 1
   %conv = uitofp i64 %ld to float
   ret float %conv
 }
 
-define half @ui64_to_half(i64* %i, half* %f) {
+define half @ui64_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: ui64_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    ucvtf h0, x8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i64, i64* %i, align 1
+  %ld = load i64, ptr %i, align 1
   %conv = uitofp i64 %ld to half
   ret half %conv
 }
 
 
-define double @si8_to_double(i8* %i, float* %f) {
+define double @si8_to_double(ptr %i, ptr %f) {
 ; CHECK-LABEL: si8_to_double:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb w8, [x0]
 ; CHECK-NEXT:    scvtf d0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i8, i8* %i, align 1
+  %ld = load i8, ptr %i, align 1
   %conv = sitofp i8 %ld to double
   ret double %conv
 }
 
-define float @si8_to_float(i8* %i, float* %f) {
+define float @si8_to_float(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: si8_to_float:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr b0, [x0]
@@ -220,24 +220,24 @@ define float @si8_to_float(i8* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    scvtf s0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i8, i8* %i, align 1
+  %ld = load i8, ptr %i, align 1
   %conv = sitofp i8 %ld to float
   ret float %conv
 }
 
-define half @si8_to_half(i8* %i, half* %f) {
+define half @si8_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: si8_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsb w8, [x0]
 ; CHECK-NEXT:    scvtf h0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i8, i8* %i, align 1
+  %ld = load i8, ptr %i, align 1
   %conv = sitofp i8 %ld to half
   ret half %conv
 }
 
-define double @si16_to_double(i16* %i, float* %f) {
+define double @si16_to_double(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: si16_to_double:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr h0, [x0]
@@ -252,12 +252,12 @@ define double @si16_to_double(i16* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    scvtf d0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i16, i16* %i, align 1
+  %ld = load i16, ptr %i, align 1
   %conv = sitofp i16 %ld to double
   ret double %conv
 }
 
-define float @si16_to_float(i16* %i, float* %f) {
+define float @si16_to_float(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: si16_to_float:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr h0, [x0]
@@ -271,24 +271,24 @@ define float @si16_to_float(i16* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    scvtf s0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i16, i16* %i, align 1
+  %ld = load i16, ptr %i, align 1
   %conv = sitofp i16 %ld to float
   ret float %conv
 }
 
-define half @si16_to_half(i16* %i, half* %f) {
+define half @si16_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: si16_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrsh w8, [x0]
 ; CHECK-NEXT:    scvtf h0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i16, i16* %i, align 1
+  %ld = load i16, ptr %i, align 1
   %conv = sitofp i16 %ld to half
   ret half %conv
 }
 
-define double @si32_to_double(i32* %i, float* %f) {
+define double @si32_to_double(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: si32_to_double:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr s0, [x0]
@@ -302,12 +302,12 @@ define double @si32_to_double(i32* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    scvtf d0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i32, i32* %i, align 1
+  %ld = load i32, ptr %i, align 1
   %conv = sitofp i32 %ld to double
   ret double %conv
 }
 
-define float @si32_to_float(i32* %i, float* %f) {
+define float @si32_to_float(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: si32_to_float:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr s0, [x0]
@@ -320,24 +320,24 @@ define float @si32_to_float(i32* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    scvtf s0, w8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i32, i32* %i, align 1
+  %ld = load i32, ptr %i, align 1
   %conv = sitofp i32 %ld to float
   ret float %conv
 }
 
-define half @si32_to_half(i32* %i, half* %f) {
+define half @si32_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: si32_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    scvtf h0, w8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i32, i32* %i, align 1
+  %ld = load i32, ptr %i, align 1
   %conv = sitofp i32 %ld to half
   ret half %conv
 }
 
-define double @si64_to_double(i64* %i, float* %f) {
+define double @si64_to_double(ptr %i, ptr %f) {
 ; NEON-ENABLED-LABEL: si64_to_double:
 ; NEON-ENABLED:       // %bb.0: // %entry
 ; NEON-ENABLED-NEXT:    ldr d0, [x0]
@@ -350,31 +350,31 @@ define double @si64_to_double(i64* %i, float* %f) {
 ; NEON-DISABLED-NEXT:    scvtf d0, x8
 ; NEON-DISABLED-NEXT:    ret
 entry:
-  %ld = load i64, i64* %i, align 1
+  %ld = load i64, ptr %i, align 1
   %conv = sitofp i64 %ld to double
   ret double %conv
 }
 
-define float @si64_to_float(i64* %i, float* %f) {
+define float @si64_to_float(ptr %i, ptr %f) {
 ; CHECK-LABEL: si64_to_float:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    scvtf s0, x8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i64, i64* %i, align 1
+  %ld = load i64, ptr %i, align 1
   %conv = sitofp i64 %ld to float
   ret float %conv
 }
 
-define half @si64_to_half(i64* %i, half* %f) {
+define half @si64_to_half(ptr %i, ptr %f) {
 ; CHECK-LABEL: si64_to_half:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    scvtf h0, x8
 ; CHECK-NEXT:    ret
 entry:
-  %ld = load i64, i64* %i, align 1
+  %ld = load i64, ptr %i, align 1
   %conv = sitofp i64 %ld to half
   ret half %conv
 }

diff  --git a/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll b/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
index 2a73fee7718ad..61939977c4d72 100644
--- a/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
+++ b/llvm/test/CodeGen/AArch64/intrinsics-memory-barrier.ll
@@ -17,37 +17,37 @@ define void @test() {
 ; Important point is that the compiler should not reorder memory access
 ; instructions around DMB.
 ; Failure to do so, two STRs will collapse into one STP.
-define void @test_dmb_reordering(i32 %a, i32 %b, i32* %d) {
-  store i32 %a, i32* %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+define void @test_dmb_reordering(i32 %a, i32 %b, ptr %d) {
+  store i32 %a, ptr %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
 
   call void @llvm.aarch64.dmb(i32 15); CHECK: dmb sy
 
-  %d1 = getelementptr i32, i32* %d, i64 1
-  store i32 %b, i32* %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+  %d1 = getelementptr i32, ptr %d, i64 1
+  store i32 %b, ptr %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
 
   ret void
 }
 
 ; Similarly for DSB.
-define void @test_dsb_reordering(i32 %a, i32 %b, i32* %d) {
-  store i32 %a, i32* %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+define void @test_dsb_reordering(i32 %a, i32 %b, ptr %d) {
+  store i32 %a, ptr %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
 
   call void @llvm.aarch64.dsb(i32 15); CHECK: dsb sy
 
-  %d1 = getelementptr i32, i32* %d, i64 1
-  store i32 %b, i32* %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+  %d1 = getelementptr i32, ptr %d, i64 1
+  store i32 %b, ptr %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
 
   ret void
 }
 
 ; And ISB.
-define void @test_isb_reordering(i32 %a, i32 %b, i32* %d) {
-  store i32 %a, i32* %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
+define void @test_isb_reordering(i32 %a, i32 %b, ptr %d) {
+  store i32 %a, ptr %d              ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}]
 
   call void @llvm.aarch64.isb(i32 15); CHECK: isb
 
-  %d1 = getelementptr i32, i32* %d, i64 1
-  store i32 %b, i32* %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
+  %d1 = getelementptr i32, ptr %d, i64 1
+  store i32 %b, ptr %d1             ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/irg.ll b/llvm/test/CodeGen/AArch64/irg.ll
index 31a018e183623..a5a454ff69eaa 100644
--- a/llvm/test/CodeGen/AArch64/irg.ll
+++ b/llvm/test/CodeGen/AArch64/irg.ll
@@ -1,42 +1,42 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte | FileCheck %s
 
-define i8* @irg_imm16(i8* %p) {
+define ptr @irg_imm16(ptr %p) {
 entry:
 ; CHECK-LABEL: irg_imm16:
 ; CHECK: mov w[[R:[0-9]+]], #16
 ; CHECK: irg x0, x0, x[[R]]
 ; CHECK: ret
-  %q = call i8* @llvm.aarch64.irg(i8* %p, i64 16)
-  ret i8* %q
+  %q = call ptr @llvm.aarch64.irg(ptr %p, i64 16)
+  ret ptr %q
 }
 
-define i8* @irg_imm0(i8* %p) {
+define ptr @irg_imm0(ptr %p) {
 entry:
 ; CHECK-LABEL: irg_imm0:
 ; CHECK: irg x0, x0{{$}}
 ; CHECK: ret
-  %q = call i8* @llvm.aarch64.irg(i8* %p, i64 0)
-  ret i8* %q
+  %q = call ptr @llvm.aarch64.irg(ptr %p, i64 0)
+  ret ptr %q
 }
 
-define i8* @irg_reg(i8* %p, i64 %ex) {
+define ptr @irg_reg(ptr %p, i64 %ex) {
 entry:
 ; CHECK-LABEL: irg_reg:
 ; CHECK: irg x0, x0, x1
 ; CHECK: ret
-  %q = call i8* @llvm.aarch64.irg(i8* %p, i64 %ex)
-  ret i8* %q
+  %q = call ptr @llvm.aarch64.irg(ptr %p, i64 %ex)
+  ret ptr %q
 }
 
 ; undef argument in irg is treated specially
-define i8* @irg_sp() {
+define ptr @irg_sp() {
 entry:
 ; CHECK-LABEL: irg_sp:
 ; CHECK: irg x0, sp{{$}}
 ; CHECK: ret
-  %q = call i8* @llvm.aarch64.irg.sp(i64 0)
-  ret i8* %q
+  %q = call ptr @llvm.aarch64.irg.sp(i64 0)
+  ret ptr %q
 }
 
-declare i8* @llvm.aarch64.irg(i8* %p, i64 %exclude)
-declare i8* @llvm.aarch64.irg.sp(i64 %exclude)
+declare ptr @llvm.aarch64.irg(ptr %p, i64 %exclude)
+declare ptr @llvm.aarch64.irg.sp(i64 %exclude)

diff  --git a/llvm/test/CodeGen/AArch64/irg_sp_tagp.ll b/llvm/test/CodeGen/AArch64/irg_sp_tagp.ll
index 082b4aa948b34..4fa96c771a330 100644
--- a/llvm/test/CodeGen/AArch64/irg_sp_tagp.ll
+++ b/llvm/test/CodeGen/AArch64/irg_sp_tagp.ll
@@ -1,22 +1,22 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte | FileCheck %s
 
-define dso_local i8* @small_alloca() {
+define dso_local ptr @small_alloca() {
 entry:
 ; CHECK-LABEL: small_alloca:
 ; CHECK:      irg  x0, sp{{$}}
 ; CHECK:      ret
   %a = alloca i8, align 16
-  %q = call i8* @llvm.aarch64.irg.sp(i64 0)
-  %q1 = call i8* @llvm.aarch64.tagp.p0i8(i8* %a, i8* %q, i64 1)
-  ret i8* %q1
+  %q = call ptr @llvm.aarch64.irg.sp(i64 0)
+  %q1 = call ptr @llvm.aarch64.tagp.p0(ptr %a, ptr %q, i64 1)
+  ret ptr %q1
 }
 
- at sink = dso_local global i8* null, align 8
+ at sink = dso_local global ptr null, align 8
 
 ; Check that IRG is pinned to %b because the store instruction needs
 ; the address in a non-fixed physical register and can benefit from it
 ; being equal to the base tagged pointer.
-define dso_local i8* @small_allocas() {
+define dso_local ptr @small_allocas() {
 entry:
 ; CHECK-LABEL: small_allocas:
 ; CHECK:      irg  [[R:x[0-9]+]], sp{{$}}
@@ -25,11 +25,11 @@ entry:
 ; CHECK:      ret
   %a = alloca i8, align 16
   %b = alloca i8, align 16
-  %q = call i8* @llvm.aarch64.irg.sp(i64 0)
-  %q1 = call i8* @llvm.aarch64.tagp.p0i8(i8* %a, i8* %q, i64 1)
-  %q2 = call i8* @llvm.aarch64.tagp.p0i8(i8* %b, i8* %q, i64 2)
-  store i8* %q2, i8** @sink, align 8
-  ret i8* %q1
+  %q = call ptr @llvm.aarch64.irg.sp(i64 0)
+  %q1 = call ptr @llvm.aarch64.tagp.p0(ptr %a, ptr %q, i64 1)
+  %q2 = call ptr @llvm.aarch64.tagp.p0(ptr %b, ptr %q, i64 2)
+  store ptr %q2, ptr @sink, align 8
+  ret ptr %q1
 }
 
 ; Two large allocas. One's offset overflows addg immediate.
@@ -42,10 +42,10 @@ entry:
 ; CHECK:      bl use2
   %a = alloca i8, i64 4096, align 16
   %b = alloca i8, i64 4096, align 16
-  %base = call i8* @llvm.aarch64.irg.sp(i64 0)
-  %a_t = call i8* @llvm.aarch64.tagp.p0i8(i8* %a, i8* %base, i64 1)
-  %b_t = call i8* @llvm.aarch64.tagp.p0i8(i8* %b, i8* %base, i64 0)
-  call void @use2(i8* %a_t, i8* %b_t)
+  %base = call ptr @llvm.aarch64.irg.sp(i64 0)
+  %a_t = call ptr @llvm.aarch64.tagp.p0(ptr %a, ptr %base, i64 1)
+  %b_t = call ptr @llvm.aarch64.tagp.p0(ptr %b, ptr %base, i64 0)
+  call void @use2(ptr %a_t, ptr %b_t)
   ret void
 }
 
@@ -59,9 +59,9 @@ entry:
 ; CHECK:      irg  x0, sp{{$}}
 ; CHECK:      bl use
   %a = alloca i8, i64 4096, align 64
-  %base = call i8* @llvm.aarch64.irg.sp(i64 0)
-  %a_t = call i8* @llvm.aarch64.tagp.p0i8(i8* %a, i8* %base, i64 1)
-  call void @use(i8* %a_t)
+  %base = call ptr @llvm.aarch64.irg.sp(i64 0)
+  %a_t = call ptr @llvm.aarch64.tagp.p0(ptr %a, ptr %base, i64 1)
+  call void @use(ptr %a_t)
   ret void
 }
 
@@ -74,11 +74,11 @@ entry:
 ; CHECK:      irg  x1, x1
 ; CHECK-DAG:  sub  x0, x29, #[[OFS]]
 ; CHECK:      bl   use2
-  %base = call i8* @llvm.aarch64.irg.sp(i64 0)
+  %base = call ptr @llvm.aarch64.irg.sp(i64 0)
   %a = alloca i128, i64 %size, align 16
   %b = alloca i8, i64 16, align 16
-  %b_t = call i8* @llvm.aarch64.tagp.p0i8(i8* %b, i8* %base, i64 1)
-  call void @use2(i8* %b, i8* %b_t)
+  %b_t = call ptr @llvm.aarch64.tagp.p0(ptr %b, ptr %base, i64 1)
+  call void @use2(ptr %b, ptr %b_t)
   ret void
 }
 
@@ -95,16 +95,16 @@ entryz:
 ; CHECK:      irg  x1, x1
 ; CHECK-DAG:  add  x0, x19, #[[OFS]]
 ; CHECK:      bl   use2
-  %base = call i8* @llvm.aarch64.irg.sp(i64 0)
+  %base = call ptr @llvm.aarch64.irg.sp(i64 0)
   %a = alloca i128, i64 %size, align 64
   %b = alloca i8, i64 16, align 16
-  %b_t = call i8* @llvm.aarch64.tagp.p0i8(i8* %b, i8* %base, i64 1)
-  call void @use2(i8* %b, i8* %b_t)
+  %b_t = call ptr @llvm.aarch64.tagp.p0(ptr %b, ptr %base, i64 1)
+  call void @use2(ptr %b, ptr %b_t)
   ret void
 }
 
-declare void @use(i8*)
-declare void @use2(i8*, i8*)
+declare void @use(ptr)
+declare void @use2(ptr, ptr)
 
-declare i8* @llvm.aarch64.irg.sp(i64 %exclude)
-declare i8* @llvm.aarch64.tagp.p0i8(i8* %p, i8* %tag, i64 %ofs)
+declare ptr @llvm.aarch64.irg.sp(i64 %exclude)
+declare ptr @llvm.aarch64.tagp.p0(ptr %p, ptr %tag, i64 %ofs)

diff  --git a/llvm/test/CodeGen/AArch64/landingpad-ifcvt.ll b/llvm/test/CodeGen/AArch64/landingpad-ifcvt.ll
index a5497b1d8e146..367b4975e7558 100644
--- a/llvm/test/CodeGen/AArch64/landingpad-ifcvt.ll
+++ b/llvm/test/CodeGen/AArch64/landingpad-ifcvt.ll
@@ -7,16 +7,16 @@
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-pc-windows-msvc19.11.0"
 
-define i64 @f(i32* %hwnd, i32 %message, i64 %wparam, i64 %lparam) personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define i64 @f(ptr %hwnd, i32 %message, i64 %wparam, i64 %lparam) personality ptr @__C_specific_handler {
 entry:
-  %call = invoke i64 @callee(i32* %hwnd, i32 %message, i64 %wparam, i64 %lparam)
+  %call = invoke i64 @callee(ptr %hwnd, i32 %message, i64 %wparam, i64 %lparam)
           to label %__try.cont unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %__except.ret] unwind to caller
 
 __except.ret:                                     ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* bitcast (i32 (i8*, i8*)* @filt to i8*)]
+  %1 = catchpad within %0 [ptr @filt]
   catchret from %1 to label %__try.cont
 
 __try.cont:                                       ; preds = %__except.ret, %entry
@@ -24,6 +24,6 @@ __try.cont:                                       ; preds = %__except.ret, %entr
   ret i64 %rv.0
 }
 
-declare dso_local i64 @callee(i32*, i32, i64, i64)
-declare i32 @filt(i8*, i8* nocapture readnone)
+declare dso_local i64 @callee(ptr, i32, i64, i64)
+declare i32 @filt(ptr, ptr nocapture readnone)
 declare dso_local i32 @__C_specific_handler(...)

diff  --git a/llvm/test/CodeGen/AArch64/large-stack-cmp.ll b/llvm/test/CodeGen/AArch64/large-stack-cmp.ll
index e19c8531fd4d5..56094903efe7e 100644
--- a/llvm/test/CodeGen/AArch64/large-stack-cmp.ll
+++ b/llvm/test/CodeGen/AArch64/large-stack-cmp.ll
@@ -32,7 +32,7 @@ define void @foo() {
 
 %var = alloca i32, i32 12
   %var2 = alloca i32, i32 1030
-  %tst = icmp eq i32* %var, null
+  %tst = icmp eq ptr %var, null
   br i1 %tst, label %true, label %false
 
 true:

diff  --git a/llvm/test/CodeGen/AArch64/large-stack.ll b/llvm/test/CodeGen/AArch64/large-stack.ll
index b0e83ec2b6374..c96dd60462d41 100644
--- a/llvm/test/CodeGen/AArch64/large-stack.ll
+++ b/llvm/test/CodeGen/AArch64/large-stack.ll
@@ -8,18 +8,18 @@ entry:
   %val.addr = alloca i64, align 8
   %large = alloca [268435456 x i64], align 8
   %i = alloca i32, align 4
-  store i64 %val, i64* %val.addr, align 8
-  %0 = load i64, i64* %val.addr, align 8
-  %arrayidx = getelementptr inbounds [268435456 x i64], [268435456 x i64]* %large, i64 0, i64 %0
-  store i64 1, i64* %arrayidx, align 8
-  %1 = load i64, i64* %val.addr, align 8
-  %arrayidx1 = getelementptr inbounds [268435456 x i64], [268435456 x i64]* %large, i64 0, i64 %1
-  %2 = load i64, i64* %arrayidx1, align 8
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i64 0, i64 0), i64 %2)
+  store i64 %val, ptr %val.addr, align 8
+  %0 = load i64, ptr %val.addr, align 8
+  %arrayidx = getelementptr inbounds [268435456 x i64], ptr %large, i64 0, i64 %0
+  store i64 1, ptr %arrayidx, align 8
+  %1 = load i64, ptr %val.addr, align 8
+  %arrayidx1 = getelementptr inbounds [268435456 x i64], ptr %large, i64 0, i64 %1
+  %2 = load i64, ptr %arrayidx1, align 8
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i64 %2)
   ret void
 }
 
-declare dso_local i32 @printf(i8*, ...)
+declare dso_local i32 @printf(ptr, ...)
 
 attributes #0 = { noinline optnone "frame-pointer"="all" uwtable }
 

diff  --git a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
index 951bd4ada3c99..cf638356dfdf5 100644
--- a/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
+++ b/llvm/test/CodeGen/AArch64/ldp-stp-scaled-unscaled-pairs.ll
@@ -3,48 +3,40 @@
 ; CHECK-LABEL: test_strd_sturd:
 ; CHECK-NEXT: stp d0, d1, [x0, #-8]
 ; CHECK-NEXT: ret
-define void @test_strd_sturd(float* %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
-  %tmp1 = bitcast float* %ptr to <2 x float>*
-  store <2 x float> %v2, <2 x float>* %tmp1, align 16
-  %add.ptr = getelementptr inbounds float, float* %ptr, i64 -2
-  %tmp = bitcast float* %add.ptr to <2 x float>*
-  store <2 x float> %v1, <2 x float>* %tmp, align 16
+define void @test_strd_sturd(ptr %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
+  store <2 x float> %v2, ptr %ptr, align 16
+  %add.ptr = getelementptr inbounds float, ptr %ptr, i64 -2
+  store <2 x float> %v1, ptr %add.ptr, align 16
   ret void
 }
 
 ; CHECK-LABEL: test_sturd_strd:
 ; CHECK-NEXT: stp d0, d1, [x0, #-8]
 ; CHECK-NEXT: ret
-define void @test_sturd_strd(float* %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
-  %add.ptr = getelementptr inbounds float, float* %ptr, i64 -2
-  %tmp = bitcast float* %add.ptr to <2 x float>*
-  store <2 x float> %v1, <2 x float>* %tmp, align 16
-  %tmp1 = bitcast float* %ptr to <2 x float>*
-  store <2 x float> %v2, <2 x float>* %tmp1, align 16
+define void @test_sturd_strd(ptr %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
+  %add.ptr = getelementptr inbounds float, ptr %ptr, i64 -2
+  store <2 x float> %v1, ptr %add.ptr, align 16
+  store <2 x float> %v2, ptr %ptr, align 16
   ret void
 }
 
 ; CHECK-LABEL: test_strq_sturq:
 ; CHECK-NEXT: stp q0, q1, [x0, #-16]
 ; CHECK-NEXT: ret
-define void @test_strq_sturq(double* %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
-  %tmp1 = bitcast double* %ptr to <2 x double>*
-  store <2 x double> %v2, <2 x double>* %tmp1, align 16
-  %add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
-  %tmp = bitcast double* %add.ptr to <2 x double>*
-  store <2 x double> %v1, <2 x double>* %tmp, align 16
+define void @test_strq_sturq(ptr %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
+  store <2 x double> %v2, ptr %ptr, align 16
+  %add.ptr = getelementptr inbounds double, ptr %ptr, i64 -2
+  store <2 x double> %v1, ptr %add.ptr, align 16
   ret void
 }
 
 ; CHECK-LABEL: test_sturq_strq:
 ; CHECK-NEXT: stp q0, q1, [x0, #-16]
 ; CHECK-NEXT: ret
-define void @test_sturq_strq(double* %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
-  %add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
-  %tmp = bitcast double* %add.ptr to <2 x double>*
-  store <2 x double> %v1, <2 x double>* %tmp, align 16
-  %tmp1 = bitcast double* %ptr to <2 x double>*
-  store <2 x double> %v2, <2 x double>* %tmp1, align 16
+define void @test_sturq_strq(ptr %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
+  %add.ptr = getelementptr inbounds double, ptr %ptr, i64 -2
+  store <2 x double> %v1, ptr %add.ptr, align 16
+  store <2 x double> %v2, ptr %ptr, align 16
   ret void
 }
 
@@ -52,10 +44,10 @@ define void @test_sturq_strq(double* %ptr, <2 x double> %v1, <2 x double> %v2) #
 ; CHECK-NEXT: ldp [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-8]
 ; CHECK-NEXT: add x0, [[V0]], [[V1]]
 ; CHECK-NEXT: ret
-define i64 @test_ldrx_ldurx(i64* %p) #0 {
-  %tmp = load i64, i64* %p, align 4
-  %add.ptr = getelementptr inbounds i64, i64* %p, i64 -1
-  %tmp1 = load i64, i64* %add.ptr, align 4
+define i64 @test_ldrx_ldurx(ptr %p) #0 {
+  %tmp = load i64, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i64, ptr %p, i64 -1
+  %tmp1 = load i64, ptr %add.ptr, align 4
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }
@@ -64,10 +56,10 @@ define i64 @test_ldrx_ldurx(i64* %p) #0 {
 ; CHECK-NEXT: ldp [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-8]
 ; CHECK-NEXT: add x0, [[V0]], [[V1]]
 ; CHECK-NEXT: ret
-define i64 @test_ldurx_ldrx(i64* %p) #0 {
-  %add.ptr = getelementptr inbounds i64, i64* %p, i64 -1
-  %tmp1 = load i64, i64* %add.ptr, align 4
-  %tmp = load i64, i64* %p, align 4
+define i64 @test_ldurx_ldrx(ptr %p) #0 {
+  %add.ptr = getelementptr inbounds i64, ptr %p, i64 -1
+  %tmp1 = load i64, ptr %add.ptr, align 4
+  %tmp = load i64, ptr %p, align 4
   %add = add nsw i64 %tmp1, %tmp
   ret i64 %add
 }
@@ -76,10 +68,10 @@ define i64 @test_ldurx_ldrx(i64* %p) #0 {
 ; CHECK-NEXT: ldpsw [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-4]
 ; CHECK-NEXT: add x0, [[V0]], [[V1]]
 ; CHECK-NEXT: ret
-define i64 @test_ldrsw_ldursw(i32* %p) #0 {
-  %tmp = load i32, i32* %p, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %p, i64 -1
-  %tmp1 = load i32, i32* %add.ptr, align 4
+define i64 @test_ldrsw_ldursw(ptr %p) #0 {
+  %tmp = load i32, ptr %p, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %p, i64 -1
+  %tmp1 = load i32, ptr %add.ptr, align 4
   %sexttmp = sext i32 %tmp to i64
   %sexttmp1 = sext i32 %tmp1 to i64
   %add = add nsw i64 %sexttmp1, %sexttmp
@@ -92,12 +84,10 @@ define i64 @test_ldrsw_ldursw(i32* %p) #0 {
 ; CHECK-NEXT: ldur q[[V1:[0-9]+]], [x0, #24]
 ; CHECK-NEXT: add.2d v0, v[[V0]], v[[V1]]
 ; CHECK-NEXT: ret
-define <2 x i64> @test_ldrq_ldruq_invalidoffset(i64* %p) #0 {
-  %a1 = bitcast i64* %p to <2 x i64>*
-  %tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
-  %add.ptr2 = getelementptr inbounds i64, i64* %p, i64 3
-  %a2 = bitcast i64* %add.ptr2 to <2 x i64>*
-  %tmp2 = load <2 x i64>, <2 x i64>* %a2, align 8
+define <2 x i64> @test_ldrq_ldruq_invalidoffset(ptr %p) #0 {
+  %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+  %add.ptr2 = getelementptr inbounds i64, ptr %p, i64 3
+  %tmp2 = load <2 x i64>, ptr %add.ptr2, align 8
   %add = add nsw <2 x i64> %tmp1, %tmp2
   ret <2 x i64> %add
 }
@@ -111,15 +101,13 @@ define void @test_stur_str_no_assert() #0 {
 entry:
   %a1 = alloca i64, align 4
   %a2 = alloca [12 x i8], align 4
-  %0 = bitcast i64* %a1 to i8*
-  %C = getelementptr inbounds [12 x i8], [12 x i8]* %a2, i64 0, i64 4
-  %1 = bitcast i8* %C to i64*
-  store i64 0, i64* %1, align 4
-  call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 8, i1 false)
+  %C = getelementptr inbounds [12 x i8], ptr %a2, i64 0, i64 4
+  store i64 0, ptr %C, align 4
+  call void @llvm.memset.p0.i64(ptr align 8 %a1, i8 0, i64 8, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll b/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
index 8ddf06996cdf9..118f52b037bf1 100644
--- a/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
@@ -4,7 +4,7 @@
 ; Run at O3 to make sure we can optimize load/store instructions after Machine
 ; Block Placement takes place using Tail Duplication Threshold = 4.
 
-define void @foo(i1 %cond, i64* %ptr) {
+define void @foo(i1 %cond, ptr %ptr) {
 ; CHECK-LABEL: foo:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    tbz w0, #0, .LBB0_2
@@ -26,24 +26,24 @@ entry:
   br i1 %cond, label %if.then, label %if.else
 
 if.then:
-  %0 = getelementptr inbounds i64, i64* %ptr, i64 2
-  %1 = load i64, i64* %0, align 8
-  store i64 0, i64* %0, align 8
+  %0 = getelementptr inbounds i64, ptr %ptr, i64 2
+  %1 = load i64, ptr %0, align 8
+  store i64 0, ptr %0, align 8
   br label %if.end
 
 if.else:
-  %2 = load i64, i64* %ptr, align 8
+  %2 = load i64, ptr %ptr, align 8
   br label %if.end
 
 if.end:
   %3 = phi i64 [ %1, %if.then ], [ %2, %if.else ]
-  %4 = getelementptr inbounds i64, i64* %ptr, i64 1
-  %5 = load i64, i64* %4, align 8
+  %4 = getelementptr inbounds i64, ptr %ptr, i64 1
+  %5 = load i64, ptr %4, align 8
   %6 = icmp slt i64 %3, %5
   br i1 %6, label %exit1, label %exit2
 
 exit1:
-  store i64 0, i64* %4, align 8
+  store i64 0, ptr %4, align 8
   ret void
 
 exit2:

diff  --git a/llvm/test/CodeGen/AArch64/ldst-opt.ll b/llvm/test/CodeGen/AArch64/ldst-opt.ll
index 3882497f3e61a..4e09e76457582 100644
--- a/llvm/test/CodeGen/AArch64/ldst-opt.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-opt.ll
@@ -3,7 +3,7 @@
 
 ; This file contains tests for the AArch64 load/store optimizer.
 
-%padding = type { i8*, i8*, i8*, i8* }
+%padding = type { ptr, ptr, ptr, ptr }
 %s.byte = type { i8, i8 }
 %s.halfword = type { i16, i16 }
 %s.word = type { i32, i32 }
@@ -29,199 +29,199 @@
 ;
 ; with X being either w1, x1, s0, d0 or q0.
 
-declare void @bar_byte(%s.byte*, i8)
+declare void @bar_byte(ptr, i8)
 
-define void @load-pre-indexed-byte(%struct.byte* %ptr) nounwind {
+define void @load-pre-indexed-byte(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pre-indexed-byte
 ; CHECK: ldrb w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.byte, %struct.byte* %ptr, i64 0, i32 1, i32 0
-  %add = load i8, i8* %a, align 4
+  %a = getelementptr inbounds %struct.byte, ptr %ptr, i64 0, i32 1, i32 0
+  %add = load i8, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.byte, %struct.byte* %ptr, i64 0, i32 1
-  tail call void @bar_byte(%s.byte* %c, i8 %add)
+  %c = getelementptr inbounds %struct.byte, ptr %ptr, i64 0, i32 1
+  tail call void @bar_byte(ptr %c, i8 %add)
   ret void
 }
 
-define void @store-pre-indexed-byte(%struct.byte* %ptr, i8 %val) nounwind {
+define void @store-pre-indexed-byte(ptr %ptr, i8 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-byte
 ; CHECK: strb w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.byte, %struct.byte* %ptr, i64 0, i32 1, i32 0
-  store i8 %val, i8* %a, align 4
+  %a = getelementptr inbounds %struct.byte, ptr %ptr, i64 0, i32 1, i32 0
+  store i8 %val, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.byte, %struct.byte* %ptr, i64 0, i32 1
-  tail call void @bar_byte(%s.byte* %c, i8 %val)
+  %c = getelementptr inbounds %struct.byte, ptr %ptr, i64 0, i32 1
+  tail call void @bar_byte(ptr %c, i8 %val)
   ret void
 }
 
-declare void @bar_halfword(%s.halfword*, i16)
+declare void @bar_halfword(ptr, i16)
 
-define void @load-pre-indexed-halfword(%struct.halfword* %ptr) nounwind {
+define void @load-pre-indexed-halfword(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pre-indexed-halfword
 ; CHECK: ldrh w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.halfword, %struct.halfword* %ptr, i64 0, i32 1, i32 0
-  %add = load i16, i16* %a, align 4
+  %a = getelementptr inbounds %struct.halfword, ptr %ptr, i64 0, i32 1, i32 0
+  %add = load i16, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.halfword, %struct.halfword* %ptr, i64 0, i32 1
-  tail call void @bar_halfword(%s.halfword* %c, i16 %add)
+  %c = getelementptr inbounds %struct.halfword, ptr %ptr, i64 0, i32 1
+  tail call void @bar_halfword(ptr %c, i16 %add)
   ret void
 }
 
-define void @store-pre-indexed-halfword(%struct.halfword* %ptr, i16 %val) nounwind {
+define void @store-pre-indexed-halfword(ptr %ptr, i16 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-halfword
 ; CHECK: strh w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.halfword, %struct.halfword* %ptr, i64 0, i32 1, i32 0
-  store i16 %val, i16* %a, align 4
+  %a = getelementptr inbounds %struct.halfword, ptr %ptr, i64 0, i32 1, i32 0
+  store i16 %val, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.halfword, %struct.halfword* %ptr, i64 0, i32 1
-  tail call void @bar_halfword(%s.halfword* %c, i16 %val)
+  %c = getelementptr inbounds %struct.halfword, ptr %ptr, i64 0, i32 1
+  tail call void @bar_halfword(ptr %c, i16 %val)
   ret void
 }
 
-declare void @bar_word(%s.word*, i32)
+declare void @bar_word(ptr, i32)
 
-define void @load-pre-indexed-word(%struct.word* %ptr) nounwind {
+define void @load-pre-indexed-word(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pre-indexed-word
 ; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0
-  %add = load i32, i32* %a, align 4
+  %a = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1, i32 0
+  %add = load i32, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1
-  tail call void @bar_word(%s.word* %c, i32 %add)
+  %c = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1
+  tail call void @bar_word(ptr %c, i32 %add)
   ret void
 }
 
-define void @store-pre-indexed-word(%struct.word* %ptr, i32 %val) nounwind {
+define void @store-pre-indexed-word(ptr %ptr, i32 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-word
 ; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0
-  store i32 %val, i32* %a, align 4
+  %a = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1, i32 0
+  store i32 %val, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1
-  tail call void @bar_word(%s.word* %c, i32 %val)
+  %c = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1
+  tail call void @bar_word(ptr %c, i32 %val)
   ret void
 }
 
-declare void @bar_doubleword(%s.doubleword*, i64)
+declare void @bar_doubleword(ptr, i64)
 
-define void @load-pre-indexed-doubleword(%struct.doubleword* %ptr) nounwind {
+define void @load-pre-indexed-doubleword(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pre-indexed-doubleword
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0
-  %add = load i64, i64* %a, align 8
+  %a = getelementptr inbounds %struct.doubleword, ptr %ptr, i64 0, i32 1, i32 0
+  %add = load i64, ptr %a, align 8
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1
-  tail call void @bar_doubleword(%s.doubleword* %c, i64 %add)
+  %c = getelementptr inbounds %struct.doubleword, ptr %ptr, i64 0, i32 1
+  tail call void @bar_doubleword(ptr %c, i64 %add)
   ret void
 }
 
-define void @store-pre-indexed-doubleword(%struct.doubleword* %ptr, i64 %val) nounwind {
+define void @store-pre-indexed-doubleword(ptr %ptr, i64 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-doubleword
 ; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0
-  store i64 %val, i64* %a, align 8
+  %a = getelementptr inbounds %struct.doubleword, ptr %ptr, i64 0, i32 1, i32 0
+  store i64 %val, ptr %a, align 8
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1
-  tail call void @bar_doubleword(%s.doubleword* %c, i64 %val)
+  %c = getelementptr inbounds %struct.doubleword, ptr %ptr, i64 0, i32 1
+  tail call void @bar_doubleword(ptr %c, i64 %val)
   ret void
 }
 
-declare void @bar_quadword(%s.quadword*, fp128)
+declare void @bar_quadword(ptr, fp128)
 
-define void @load-pre-indexed-quadword(%struct.quadword* %ptr) nounwind {
+define void @load-pre-indexed-quadword(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pre-indexed-quadword
 ; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1, i32 0
-  %add = load fp128, fp128* %a, align 16
+  %a = getelementptr inbounds %struct.quadword, ptr %ptr, i64 0, i32 1, i32 0
+  %add = load fp128, ptr %a, align 16
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1
-  tail call void @bar_quadword(%s.quadword* %c, fp128 %add)
+  %c = getelementptr inbounds %struct.quadword, ptr %ptr, i64 0, i32 1
+  tail call void @bar_quadword(ptr %c, fp128 %add)
   ret void
 }
 
-define void @store-pre-indexed-quadword(%struct.quadword* %ptr, fp128 %val) nounwind {
+define void @store-pre-indexed-quadword(ptr %ptr, fp128 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-quadword
 ; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1, i32 0
-  store fp128 %val, fp128* %a, align 16
+  %a = getelementptr inbounds %struct.quadword, ptr %ptr, i64 0, i32 1, i32 0
+  store fp128 %val, ptr %a, align 16
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1
-  tail call void @bar_quadword(%s.quadword* %c, fp128 %val)
+  %c = getelementptr inbounds %struct.quadword, ptr %ptr, i64 0, i32 1
+  tail call void @bar_quadword(ptr %c, fp128 %val)
   ret void
 }
 
-declare void @bar_float(%s.float*, float)
+declare void @bar_float(ptr, float)
 
-define void @load-pre-indexed-float(%struct.float* %ptr) nounwind {
+define void @load-pre-indexed-float(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pre-indexed-float
 ; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1, i32 0
-  %add = load float, float* %a, align 4
+  %a = getelementptr inbounds %struct.float, ptr %ptr, i64 0, i32 1, i32 0
+  %add = load float, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1
-  tail call void @bar_float(%s.float* %c, float %add)
+  %c = getelementptr inbounds %struct.float, ptr %ptr, i64 0, i32 1
+  tail call void @bar_float(ptr %c, float %add)
   ret void
 }
 
-define void @store-pre-indexed-float(%struct.float* %ptr, float %val) nounwind {
+define void @store-pre-indexed-float(ptr %ptr, float %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-float
 ; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1, i32 0
-  store float %val, float* %a, align 4
+  %a = getelementptr inbounds %struct.float, ptr %ptr, i64 0, i32 1, i32 0
+  store float %val, ptr %a, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1
-  tail call void @bar_float(%s.float* %c, float %val)
+  %c = getelementptr inbounds %struct.float, ptr %ptr, i64 0, i32 1
+  tail call void @bar_float(ptr %c, float %val)
   ret void
 }
 
-declare void @bar_double(%s.double*, double)
+declare void @bar_double(ptr, double)
 
-define void @load-pre-indexed-double(%struct.double* %ptr) nounwind {
+define void @load-pre-indexed-double(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pre-indexed-double
 ; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1, i32 0
-  %add = load double, double* %a, align 8
+  %a = getelementptr inbounds %struct.double, ptr %ptr, i64 0, i32 1, i32 0
+  %add = load double, ptr %a, align 8
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1
-  tail call void @bar_double(%s.double* %c, double %add)
+  %c = getelementptr inbounds %struct.double, ptr %ptr, i64 0, i32 1
+  tail call void @bar_double(ptr %c, double %add)
   ret void
 }
 
-define void @store-pre-indexed-double(%struct.double* %ptr, double %val) nounwind {
+define void @store-pre-indexed-double(ptr %ptr, double %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-double
 ; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #32]!
 entry:
-  %a = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1, i32 0
-  store double %val, double* %a, align 8
+  %a = getelementptr inbounds %struct.double, ptr %ptr, i64 0, i32 1, i32 0
+  store double %val, ptr %a, align 8
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1
-  tail call void @bar_double(%s.double* %c, double %val)
+  %c = getelementptr inbounds %struct.double, ptr %ptr, i64 0, i32 1
+  tail call void @bar_double(ptr %c, double %val)
   ret void
 }
 
@@ -234,36 +234,36 @@ bar:
 ; (ldp|stp) w1, w2, [x0, #32]!
 ;
 
-define void @load-pair-pre-indexed-word(%struct.word* %ptr) nounwind {
+define void @load-pair-pre-indexed-word(ptr %ptr) nounwind {
 ; CHECK-LABEL: load-pair-pre-indexed-word
 ; CHECK: ldp w{{[0-9]+}}, w{{[0-9]+}}, [x0, #32]!
 ; CHECK-NOT: add x0, x0, #32
 entry:
-  %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0
-  %a1 = load i32, i32* %a, align 4
-  %b = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 1
-  %b1 = load i32, i32* %b, align 4
+  %a = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1, i32 0
+  %a1 = load i32, ptr %a, align 4
+  %b = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1, i32 1
+  %b1 = load i32, ptr %b, align 4
   %add = add i32 %a1, %b1
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1
-  tail call void @bar_word(%s.word* %c, i32 %add)
+  %c = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1
+  tail call void @bar_word(ptr %c, i32 %add)
   ret void
 }
 
-define void @store-pair-pre-indexed-word(%struct.word* %ptr, i32 %val) nounwind {
+define void @store-pair-pre-indexed-word(ptr %ptr, i32 %val) nounwind {
 ; CHECK-LABEL: store-pair-pre-indexed-word
 ; CHECK: stp w{{[0-9]+}}, w{{[0-9]+}}, [x0, #32]!
 ; CHECK-NOT: add x0, x0, #32
 entry:
-  %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0
-  store i32 %val, i32* %a, align 4
-  %b = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 1
-  store i32 %val, i32* %b, align 4
+  %a = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1, i32 0
+  store i32 %val, ptr %a, align 4
+  %b = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1, i32 1
+  store i32 %val, ptr %b, align 4
   br label %bar
 bar:
-  %c = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1
-  tail call void @bar_word(%s.word* %c, i32 %val)
+  %c = getelementptr inbounds %struct.word, ptr %ptr, i64 0, i32 1
+  tail call void @bar_word(ptr %c, i32 %val)
   ret void
 }
 
@@ -283,183 +283,183 @@ bar:
 %pre.struct.float = type { i32, float, float, float}
 %pre.struct.double = type { i32, double, double, double}
 
-define i32 @load-pre-indexed-word2(%pre.struct.i32** %this, i1 %cond,
-                                   %pre.struct.i32* %load2) nounwind {
+define i32 @load-pre-indexed-word2(ptr %this, i1 %cond,
+                                   ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-word2
 ; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i32*, %pre.struct.i32** %this
-  %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i32, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.i32, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load i32, i32* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load i32, ptr %retptr
   ret i32 %ret
 }
 
-define i64 @load-pre-indexed-doubleword2(%pre.struct.i64** %this, i1 %cond,
-                                         %pre.struct.i64* %load2) nounwind {
+define i64 @load-pre-indexed-doubleword2(ptr %this, i1 %cond,
+                                         ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-doubleword2
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i64*, %pre.struct.i64** %this
-  %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i64, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.i64, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load i64, i64* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load i64, ptr %retptr
   ret i64 %ret
 }
 
-define <2 x i64> @load-pre-indexed-quadword2(%pre.struct.i128** %this, i1 %cond,
-                                             %pre.struct.i128* %load2) nounwind {
+define <2 x i64> @load-pre-indexed-quadword2(ptr %this, i1 %cond,
+                                             ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-quadword2
 ; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i128*, %pre.struct.i128** %this
-  %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i128, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.i128, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load <2 x i64>, <2 x i64>* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load <2 x i64>, ptr %retptr
   ret <2 x i64> %ret
 }
 
-define float @load-pre-indexed-float2(%pre.struct.float** %this, i1 %cond,
-                                      %pre.struct.float* %load2) nounwind {
+define float @load-pre-indexed-float2(ptr %this, i1 %cond,
+                                      ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-float2
 ; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.float*, %pre.struct.float** %this
-  %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.float, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.float, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load float, float* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load float, ptr %retptr
   ret float %ret
 }
 
-define double @load-pre-indexed-double2(%pre.struct.double** %this, i1 %cond,
-                                        %pre.struct.double* %load2) nounwind {
+define double @load-pre-indexed-double2(ptr %this, i1 %cond,
+                                        ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-double2
 ; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.double*, %pre.struct.double** %this
-  %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.double, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.double, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load double, double* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load double, ptr %retptr
   ret double %ret
 }
 
-define i32 @load-pre-indexed-word3(%pre.struct.i32** %this, i1 %cond,
-                                   %pre.struct.i32* %load2) nounwind {
+define i32 @load-pre-indexed-word3(ptr %this, i1 %cond,
+                                   ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-word3
 ; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #12]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i32*, %pre.struct.i32** %this
-  %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 3
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i32, ptr %load1, i64 0, i32 3
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 4
+  %gep2 = getelementptr inbounds %pre.struct.i32, ptr %load2, i64 0, i32 4
   br label %return
 return:
-  %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load i32, i32* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load i32, ptr %retptr
   ret i32 %ret
 }
 
-define i64 @load-pre-indexed-doubleword3(%pre.struct.i64** %this, i1 %cond,
-                                         %pre.struct.i64* %load2) nounwind {
+define i64 @load-pre-indexed-doubleword3(ptr %this, i1 %cond,
+                                         ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-doubleword3
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #16]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i64*, %pre.struct.i64** %this
-  %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 2
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i64, ptr %load1, i64 0, i32 2
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 3
+  %gep2 = getelementptr inbounds %pre.struct.i64, ptr %load2, i64 0, i32 3
   br label %return
 return:
-  %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load i64, i64* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load i64, ptr %retptr
   ret i64 %ret
 }
 
-define <2 x i64> @load-pre-indexed-quadword3(%pre.struct.i128** %this, i1 %cond,
-                                             %pre.struct.i128* %load2) nounwind {
+define <2 x i64> @load-pre-indexed-quadword3(ptr %this, i1 %cond,
+                                             ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-quadword3
 ; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i128*, %pre.struct.i128** %this
-  %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 2
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i128, ptr %load1, i64 0, i32 2
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 3
+  %gep2 = getelementptr inbounds %pre.struct.i128, ptr %load2, i64 0, i32 3
   br label %return
 return:
-  %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load <2 x i64>, <2 x i64>* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load <2 x i64>, ptr %retptr
   ret <2 x i64> %ret
 }
 
-define float @load-pre-indexed-float3(%pre.struct.float** %this, i1 %cond,
-                                      %pre.struct.float* %load2) nounwind {
+define float @load-pre-indexed-float3(ptr %this, i1 %cond,
+                                      ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-float3
 ; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.float*, %pre.struct.float** %this
-  %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 2
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.float, ptr %load1, i64 0, i32 2
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 3
+  %gep2 = getelementptr inbounds %pre.struct.float, ptr %load2, i64 0, i32 3
   br label %return
 return:
-  %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load float, float* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load float, ptr %retptr
   ret float %ret
 }
 
-define double @load-pre-indexed-double3(%pre.struct.double** %this, i1 %cond,
-                                        %pre.struct.double* %load2) nounwind {
+define double @load-pre-indexed-double3(ptr %this, i1 %cond,
+                                        ptr %load2) nounwind {
 ; CHECK-LABEL: load-pre-indexed-double3
 ; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #16]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.double*, %pre.struct.double** %this
-  %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 2
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.double, ptr %load1, i64 0, i32 2
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 3
+  %gep2 = getelementptr inbounds %pre.struct.double, ptr %load2, i64 0, i32 3
   br label %return
 return:
-  %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  %ret = load double, double* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  %ret = load double, ptr %retptr
   ret double %ret
 }
 
@@ -473,193 +473,193 @@ return:
 ;
 ; with X being either w0, x0, s0, d0 or q0.
 
-define void @store-pre-indexed-word2(%pre.struct.i32** %this, i1 %cond,
-                                     %pre.struct.i32* %load2,
+define void @store-pre-indexed-word2(ptr %this, i1 %cond,
+                                     ptr %load2,
                                      i32 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-word2
 ; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i32*, %pre.struct.i32** %this
-  %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i32, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.i32, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store i32 %val, i32* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store i32 %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-doubleword2(%pre.struct.i64** %this, i1 %cond,
-                                           %pre.struct.i64* %load2,
+define void @store-pre-indexed-doubleword2(ptr %this, i1 %cond,
+                                           ptr %load2,
                                            i64 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-doubleword2
 ; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i64*, %pre.struct.i64** %this
-  %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i64, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.i64, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store i64 %val, i64* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store i64 %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-quadword2(%pre.struct.i128** %this, i1 %cond,
-                                         %pre.struct.i128* %load2,
+define void @store-pre-indexed-quadword2(ptr %this, i1 %cond,
+                                         ptr %load2,
                                          <2 x i64> %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-quadword2
 ; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #16]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i128*, %pre.struct.i128** %this
-  %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i128, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.i128, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store <2 x i64> %val, <2 x i64>* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store <2 x i64> %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-float2(%pre.struct.float** %this, i1 %cond,
-                                      %pre.struct.float* %load2,
+define void @store-pre-indexed-float2(ptr %this, i1 %cond,
+                                      ptr %load2,
                                       float %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-float2
 ; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #4]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.float*, %pre.struct.float** %this
-  %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.float, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.float, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store float %val, float* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store float %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-double2(%pre.struct.double** %this, i1 %cond,
-                                      %pre.struct.double* %load2,
+define void @store-pre-indexed-double2(ptr %this, i1 %cond,
+                                      ptr %load2,
                                       double %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-double2
 ; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.double*, %pre.struct.double** %this
-  %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.double, ptr %load1, i64 0, i32 1
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
+  %gep2 = getelementptr inbounds %pre.struct.double, ptr %load2, i64 0, i32 2
   br label %return
 return:
-  %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store double %val, double* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store double %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-word3(%pre.struct.i32** %this, i1 %cond,
-                                     %pre.struct.i32* %load2,
+define void @store-pre-indexed-word3(ptr %this, i1 %cond,
+                                     ptr %load2,
                                      i32 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-word3
 ; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #12]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i32*, %pre.struct.i32** %this
-  %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 3
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i32, ptr %load1, i64 0, i32 3
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 4
+  %gep2 = getelementptr inbounds %pre.struct.i32, ptr %load2, i64 0, i32 4
   br label %return
 return:
-  %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store i32 %val, i32* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store i32 %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-doubleword3(%pre.struct.i64** %this, i1 %cond,
-                                           %pre.struct.i64* %load2,
+define void @store-pre-indexed-doubleword3(ptr %this, i1 %cond,
+                                           ptr %load2,
                                            i64 %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-doubleword3
 ; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #24]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i64*, %pre.struct.i64** %this
-  %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 3
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i64, ptr %load1, i64 0, i32 3
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 4
+  %gep2 = getelementptr inbounds %pre.struct.i64, ptr %load2, i64 0, i32 4
   br label %return
 return:
-  %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store i64 %val, i64* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store i64 %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-quadword3(%pre.struct.i128** %this, i1 %cond,
-                                         %pre.struct.i128* %load2,
+define void @store-pre-indexed-quadword3(ptr %this, i1 %cond,
+                                         ptr %load2,
                                          <2 x i64> %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-quadword3
 ; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.i128*, %pre.struct.i128** %this
-  %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 2
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.i128, ptr %load1, i64 0, i32 2
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 3
+  %gep2 = getelementptr inbounds %pre.struct.i128, ptr %load2, i64 0, i32 3
   br label %return
 return:
-  %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store <2 x i64> %val, <2 x i64>* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store <2 x i64> %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-float3(%pre.struct.float** %this, i1 %cond,
-                                      %pre.struct.float* %load2,
+define void @store-pre-indexed-float3(ptr %this, i1 %cond,
+                                      ptr %load2,
                                       float %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-float3
 ; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #8]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.float*, %pre.struct.float** %this
-  %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 2
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.float, ptr %load1, i64 0, i32 2
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 3
+  %gep2 = getelementptr inbounds %pre.struct.float, ptr %load2, i64 0, i32 3
   br label %return
 return:
-  %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store float %val, float* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store float %val, ptr %retptr
   ret void
 }
 
-define void @store-pre-indexed-double3(%pre.struct.double** %this, i1 %cond,
-                                      %pre.struct.double* %load2,
+define void @store-pre-indexed-double3(ptr %this, i1 %cond,
+                                      ptr %load2,
                                       double %val) nounwind {
 ; CHECK-LABEL: store-pre-indexed-double3
 ; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #16]!
   br i1 %cond, label %if.then, label %if.end
 if.then:
-  %load1 = load %pre.struct.double*, %pre.struct.double** %this
-  %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 2
+  %load1 = load ptr, ptr %this
+  %gep1 = getelementptr inbounds %pre.struct.double, ptr %load1, i64 0, i32 2
   br label %return
 if.end:
-  %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 3
+  %gep2 = getelementptr inbounds %pre.struct.double, ptr %load2, i64 0, i32 3
   br label %return
 return:
-  %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
-  store double %val, double* %retptr
+  %retptr = phi ptr [ %gep1, %if.then ], [ %gep2, %if.end ]
+  store double %val, ptr %retptr
   ret void
 }
 
@@ -673,23 +673,23 @@ return:
 ;
 ; with X being either w0, x0, s0, d0 or q0.
 
-define void @load-post-indexed-byte(i8* %array, i64 %count) nounwind {
+define void @load-post-indexed-byte(ptr %array, i64 %count) nounwind {
 ; CHECK-LABEL: load-post-indexed-byte
 ; CHECK: ldrb w{{[0-9]+}}, [x{{[0-9]+}}], #4
 entry:
-  %gep1 = getelementptr i8, i8* %array, i64 2
+  %gep1 = getelementptr i8, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i8* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i8, i8* %iv2, i64 -1
-  %load = load i8, i8* %gep2
+  %gep2 = getelementptr i8, ptr %iv2, i64 -1
+  %load = load i8, ptr %gep2
   call void @use-byte(i8 %load)
-  %load2 = load i8, i8* %iv2
+  %load2 = load i8, ptr %iv2
   call void @use-byte(i8 %load2)
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i8, i8* %iv2, i64 4
+  %gep3 = getelementptr i8, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -697,23 +697,23 @@ exit:
   ret void
 }
 
-define void @load-post-indexed-halfword(i16* %array, i64 %count) nounwind {
+define void @load-post-indexed-halfword(ptr %array, i64 %count) nounwind {
 ; CHECK-LABEL: load-post-indexed-halfword
 ; CHECK: ldrh w{{[0-9]+}}, [x{{[0-9]+}}], #8
 entry:
-  %gep1 = getelementptr i16, i16* %array, i64 2
+  %gep1 = getelementptr i16, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i16* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i16, i16* %iv2, i64 -1
-  %load = load i16, i16* %gep2
+  %gep2 = getelementptr i16, ptr %iv2, i64 -1
+  %load = load i16, ptr %gep2
   call void @use-halfword(i16 %load)
-  %load2 = load i16, i16* %iv2
+  %load2 = load i16, ptr %iv2
   call void @use-halfword(i16 %load2)
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i16, i16* %iv2, i64 4
+  %gep3 = getelementptr i16, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -721,23 +721,23 @@ exit:
   ret void
 }
 
-define void @load-post-indexed-word(i32* %array, i64 %count) nounwind {
+define void @load-post-indexed-word(ptr %array, i64 %count) nounwind {
 ; CHECK-LABEL: load-post-indexed-word
 ; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}], #16
 entry:
-  %gep1 = getelementptr i32, i32* %array, i64 2
+  %gep1 = getelementptr i32, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i32, i32* %iv2, i64 -1
-  %load = load i32, i32* %gep2
+  %gep2 = getelementptr i32, ptr %iv2, i64 -1
+  %load = load i32, ptr %gep2
   call void @use-word(i32 %load)
-  %load2 = load i32, i32* %iv2
+  %load2 = load i32, ptr %iv2
   call void @use-word(i32 %load2)
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i32, i32* %iv2, i64 4
+  %gep3 = getelementptr i32, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -745,23 +745,23 @@ exit:
   ret void
 }
 
-define void @load-post-indexed-doubleword(i64* %array, i64 %count) nounwind {
+define void @load-post-indexed-doubleword(ptr %array, i64 %count) nounwind {
 ; CHECK-LABEL: load-post-indexed-doubleword
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #32
 entry:
-  %gep1 = getelementptr i64, i64* %array, i64 2
+  %gep1 = getelementptr i64, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i64, i64* %iv2, i64 -1
-  %load = load i64, i64* %gep2
+  %gep2 = getelementptr i64, ptr %iv2, i64 -1
+  %load = load i64, ptr %gep2
   call void @use-doubleword(i64 %load)
-  %load2 = load i64, i64* %iv2
+  %load2 = load i64, ptr %iv2
   call void @use-doubleword(i64 %load2)
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i64, i64* %iv2, i64 4
+  %gep3 = getelementptr i64, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -769,23 +769,23 @@ exit:
   ret void
 }
 
-define void @load-post-indexed-quadword(<2 x i64>* %array, i64 %count) nounwind {
+define void @load-post-indexed-quadword(ptr %array, i64 %count) nounwind {
 ; CHECK-LABEL: load-post-indexed-quadword
 ; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}], #64
 entry:
-  %gep1 = getelementptr <2 x i64>, <2 x i64>* %array, i64 2
+  %gep1 = getelementptr <2 x i64>, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 -1
-  %load = load <2 x i64>, <2 x i64>* %gep2
+  %gep2 = getelementptr <2 x i64>, ptr %iv2, i64 -1
+  %load = load <2 x i64>, ptr %gep2
   call void @use-quadword(<2 x i64> %load)
-  %load2 = load <2 x i64>, <2 x i64>* %iv2
+  %load2 = load <2 x i64>, ptr %iv2
   call void @use-quadword(<2 x i64> %load2)
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 4
+  %gep3 = getelementptr <2 x i64>, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -793,23 +793,23 @@ exit:
   ret void
 }
 
-define void @load-post-indexed-float(float* %array, i64 %count) nounwind {
+define void @load-post-indexed-float(ptr %array, i64 %count) nounwind {
 ; CHECK-LABEL: load-post-indexed-float
 ; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}], #16
 entry:
-  %gep1 = getelementptr float, float* %array, i64 2
+  %gep1 = getelementptr float, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr float, float* %iv2, i64 -1
-  %load = load float, float* %gep2
+  %gep2 = getelementptr float, ptr %iv2, i64 -1
+  %load = load float, ptr %gep2
   call void @use-float(float %load)
-  %load2 = load float, float* %iv2
+  %load2 = load float, ptr %iv2
   call void @use-float(float %load2)
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr float, float* %iv2, i64 4
+  %gep3 = getelementptr float, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -817,23 +817,23 @@ exit:
   ret void
 }
 
-define void @load-post-indexed-double(double* %array, i64 %count) nounwind {
+define void @load-post-indexed-double(ptr %array, i64 %count) nounwind {
 ; CHECK-LABEL: load-post-indexed-double
 ; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}], #32
 entry:
-  %gep1 = getelementptr double, double* %array, i64 2
+  %gep1 = getelementptr double, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr double, double* %iv2, i64 -1
-  %load = load double, double* %gep2
+  %gep2 = getelementptr double, ptr %iv2, i64 -1
+  %load = load double, ptr %gep2
   call void @use-double(double %load)
-  %load2 = load double, double* %iv2
+  %load2 = load double, ptr %iv2
   call void @use-double(double %load2)
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr double, double* %iv2, i64 4
+  %gep3 = getelementptr double, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -851,22 +851,22 @@ exit:
 ;
 ; with X being either w0, x0, s0, d0 or q0.
 
-define void @store-post-indexed-byte(i8* %array, i64 %count, i8 %val) nounwind {
+define void @store-post-indexed-byte(ptr %array, i64 %count, i8 %val) nounwind {
 ; CHECK-LABEL: store-post-indexed-byte
 ; CHECK: strb w{{[0-9]+}}, [x{{[0-9]+}}], #4
 entry:
-  %gep1 = getelementptr i8, i8* %array, i64 2
+  %gep1 = getelementptr i8, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i8* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i8, i8* %iv2, i64 -1
-  %load = load i8, i8* %gep2
+  %gep2 = getelementptr i8, ptr %iv2, i64 -1
+  %load = load i8, ptr %gep2
   call void @use-byte(i8 %load)
-  store i8 %val, i8* %iv2
+  store i8 %val, ptr %iv2
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i8, i8* %iv2, i64 4
+  %gep3 = getelementptr i8, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -874,22 +874,22 @@ exit:
   ret void
 }
 
-define void @store-post-indexed-halfword(i16* %array, i64 %count, i16 %val) nounwind {
+define void @store-post-indexed-halfword(ptr %array, i64 %count, i16 %val) nounwind {
 ; CHECK-LABEL: store-post-indexed-halfword
 ; CHECK: strh w{{[0-9]+}}, [x{{[0-9]+}}], #8
 entry:
-  %gep1 = getelementptr i16, i16* %array, i64 2
+  %gep1 = getelementptr i16, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i16* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i16, i16* %iv2, i64 -1
-  %load = load i16, i16* %gep2
+  %gep2 = getelementptr i16, ptr %iv2, i64 -1
+  %load = load i16, ptr %gep2
   call void @use-halfword(i16 %load)
-  store i16 %val, i16* %iv2
+  store i16 %val, ptr %iv2
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i16, i16* %iv2, i64 4
+  %gep3 = getelementptr i16, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -897,22 +897,22 @@ exit:
   ret void
 }
 
-define void @store-post-indexed-word(i32* %array, i64 %count, i32 %val) nounwind {
+define void @store-post-indexed-word(ptr %array, i64 %count, i32 %val) nounwind {
 ; CHECK-LABEL: store-post-indexed-word
 ; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}], #16
 entry:
-  %gep1 = getelementptr i32, i32* %array, i64 2
+  %gep1 = getelementptr i32, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i32, i32* %iv2, i64 -1
-  %load = load i32, i32* %gep2
+  %gep2 = getelementptr i32, ptr %iv2, i64 -1
+  %load = load i32, ptr %gep2
   call void @use-word(i32 %load)
-  store i32 %val, i32* %iv2
+  store i32 %val, ptr %iv2
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i32, i32* %iv2, i64 4
+  %gep3 = getelementptr i32, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -920,22 +920,22 @@ exit:
   ret void
 }
 
-define void @store-post-indexed-doubleword(i64* %array, i64 %count, i64 %val) nounwind {
+define void @store-post-indexed-doubleword(ptr %array, i64 %count, i64 %val) nounwind {
 ; CHECK-LABEL: store-post-indexed-doubleword
 ; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #32
 entry:
-  %gep1 = getelementptr i64, i64* %array, i64 2
+  %gep1 = getelementptr i64, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr i64, i64* %iv2, i64 -1
-  %load = load i64, i64* %gep2
+  %gep2 = getelementptr i64, ptr %iv2, i64 -1
+  %load = load i64, ptr %gep2
   call void @use-doubleword(i64 %load)
-  store i64 %val, i64* %iv2
+  store i64 %val, ptr %iv2
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr i64, i64* %iv2, i64 4
+  %gep3 = getelementptr i64, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -943,22 +943,22 @@ exit:
   ret void
 }
 
-define void @store-post-indexed-quadword(<2 x i64>* %array, i64 %count, <2 x i64> %val) nounwind {
+define void @store-post-indexed-quadword(ptr %array, i64 %count, <2 x i64> %val) nounwind {
 ; CHECK-LABEL: store-post-indexed-quadword
 ; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}], #64
 entry:
-  %gep1 = getelementptr <2 x i64>, <2 x i64>* %array, i64 2
+  %gep1 = getelementptr <2 x i64>, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 -1
-  %load = load <2 x i64>, <2 x i64>* %gep2
+  %gep2 = getelementptr <2 x i64>, ptr %iv2, i64 -1
+  %load = load <2 x i64>, ptr %gep2
   call void @use-quadword(<2 x i64> %load)
-  store <2 x i64> %val, <2 x i64>* %iv2
+  store <2 x i64> %val, ptr %iv2
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 4
+  %gep3 = getelementptr <2 x i64>, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -966,22 +966,22 @@ exit:
   ret void
 }
 
-define void @store-post-indexed-float(float* %array, i64 %count, float %val) nounwind {
+define void @store-post-indexed-float(ptr %array, i64 %count, float %val) nounwind {
 ; CHECK-LABEL: store-post-indexed-float
 ; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}], #16
 entry:
-  %gep1 = getelementptr float, float* %array, i64 2
+  %gep1 = getelementptr float, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr float, float* %iv2, i64 -1
-  %load = load float, float* %gep2
+  %gep2 = getelementptr float, ptr %iv2, i64 -1
+  %load = load float, ptr %gep2
   call void @use-float(float %load)
-  store float %val, float* %iv2
+  store float %val, ptr %iv2
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr float, float* %iv2, i64 4
+  %gep3 = getelementptr float, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -989,22 +989,22 @@ exit:
   ret void
 }
 
-define void @store-post-indexed-double(double* %array, i64 %count, double %val) nounwind {
+define void @store-post-indexed-double(ptr %array, i64 %count, double %val) nounwind {
 ; CHECK-LABEL: store-post-indexed-double
 ; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}], #32
 entry:
-  %gep1 = getelementptr double, double* %array, i64 2
+  %gep1 = getelementptr double, ptr %array, i64 2
   br label %body
 
 body:
-  %iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
+  %iv2 = phi ptr [ %gep3, %body ], [ %gep1, %entry ]
   %iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
-  %gep2 = getelementptr double, double* %iv2, i64 -1
-  %load = load double, double* %gep2
+  %gep2 = getelementptr double, ptr %iv2, i64 -1
+  %load = load double, ptr %gep2
   call void @use-double(double %load)
-  store double %val, double* %iv2
+  store double %val, ptr %iv2
   %iv.next = add i64 %iv, -4
-  %gep3 = getelementptr double, double* %iv2, i64 4
+  %gep3 = getelementptr double, ptr %iv2, i64 4
   %cond = icmp eq i64 %iv.next, 0
   br i1 %cond, label %exit, label %body
 
@@ -1035,15 +1035,15 @@ define void @store-pair-post-indexed-word() nounwind {
   %src = alloca { i32, i32 }, align 8
   %dst = alloca { i32, i32 }, align 8
 
-  %src.realp = getelementptr inbounds { i32, i32 }, { i32, i32 }* %src, i32 0, i32 0
-  %src.real = load i32, i32* %src.realp
-  %src.imagp = getelementptr inbounds { i32, i32 }, { i32, i32 }* %src, i32 0, i32 1
-  %src.imag = load i32, i32* %src.imagp
+  %src.realp = getelementptr inbounds { i32, i32 }, ptr %src, i32 0, i32 0
+  %src.real = load i32, ptr %src.realp
+  %src.imagp = getelementptr inbounds { i32, i32 }, ptr %src, i32 0, i32 1
+  %src.imag = load i32, ptr %src.imagp
 
-  %dst.realp = getelementptr inbounds { i32, i32 }, { i32, i32 }* %dst, i32 0, i32 0
-  %dst.imagp = getelementptr inbounds { i32, i32 }, { i32, i32 }* %dst, i32 0, i32 1
-  store i32 %src.real, i32* %dst.realp
-  store i32 %src.imag, i32* %dst.imagp
+  %dst.realp = getelementptr inbounds { i32, i32 }, ptr %dst, i32 0, i32 0
+  %dst.imagp = getelementptr inbounds { i32, i32 }, ptr %dst, i32 0, i32 1
+  store i32 %src.real, ptr %dst.realp
+  store i32 %src.imag, ptr %dst.imagp
   ret void
 }
 
@@ -1054,15 +1054,15 @@ define void @store-pair-post-indexed-doubleword() nounwind {
   %src = alloca { i64, i64 }, align 8
   %dst = alloca { i64, i64 }, align 8
 
-  %src.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %src, i32 0, i32 0
-  %src.real = load i64, i64* %src.realp
-  %src.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %src, i32 0, i32 1
-  %src.imag = load i64, i64* %src.imagp
+  %src.realp = getelementptr inbounds { i64, i64 }, ptr %src, i32 0, i32 0
+  %src.real = load i64, ptr %src.realp
+  %src.imagp = getelementptr inbounds { i64, i64 }, ptr %src, i32 0, i32 1
+  %src.imag = load i64, ptr %src.imagp
 
-  %dst.realp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %dst, i32 0, i32 0
-  %dst.imagp = getelementptr inbounds { i64, i64 }, { i64, i64 }* %dst, i32 0, i32 1
-  store i64 %src.real, i64* %dst.realp
-  store i64 %src.imag, i64* %dst.imagp
+  %dst.realp = getelementptr inbounds { i64, i64 }, ptr %dst, i32 0, i32 0
+  %dst.imagp = getelementptr inbounds { i64, i64 }, ptr %dst, i32 0, i32 1
+  store i64 %src.real, ptr %dst.realp
+  store i64 %src.imag, ptr %dst.imagp
   ret void
 }
 
@@ -1073,15 +1073,15 @@ define void @store-pair-post-indexed-float() nounwind {
   %src = alloca { float, float }, align 8
   %dst = alloca { float, float }, align 8
 
-  %src.realp = getelementptr inbounds { float, float }, { float, float }* %src, i32 0, i32 0
-  %src.real = load float, float* %src.realp
-  %src.imagp = getelementptr inbounds { float, float }, { float, float }* %src, i32 0, i32 1
-  %src.imag = load float, float* %src.imagp
+  %src.realp = getelementptr inbounds { float, float }, ptr %src, i32 0, i32 0
+  %src.real = load float, ptr %src.realp
+  %src.imagp = getelementptr inbounds { float, float }, ptr %src, i32 0, i32 1
+  %src.imag = load float, ptr %src.imagp
 
-  %dst.realp = getelementptr inbounds { float, float }, { float, float }* %dst, i32 0, i32 0
-  %dst.imagp = getelementptr inbounds { float, float }, { float, float }* %dst, i32 0, i32 1
-  store float %src.real, float* %dst.realp
-  store float %src.imag, float* %dst.imagp
+  %dst.realp = getelementptr inbounds { float, float }, ptr %dst, i32 0, i32 0
+  %dst.imagp = getelementptr inbounds { float, float }, ptr %dst, i32 0, i32 1
+  store float %src.real, ptr %dst.realp
+  store float %src.imag, ptr %dst.imagp
   ret void
 }
 
@@ -1092,15 +1092,15 @@ define void @store-pair-post-indexed-double() nounwind {
   %src = alloca { double, double }, align 8
   %dst = alloca { double, double }, align 8
 
-  %src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0
-  %src.real = load double, double* %src.realp
-  %src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1
-  %src.imag = load double, double* %src.imagp
+  %src.realp = getelementptr inbounds { double, double }, ptr %src, i32 0, i32 0
+  %src.real = load double, ptr %src.realp
+  %src.imagp = getelementptr inbounds { double, double }, ptr %src, i32 0, i32 1
+  %src.imag = load double, ptr %src.imagp
 
-  %dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0
-  %dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1
-  store double %src.real, double* %dst.realp
-  store double %src.imag, double* %dst.imagp
+  %dst.realp = getelementptr inbounds { double, double }, ptr %dst, i32 0, i32 0
+  %dst.imagp = getelementptr inbounds { double, double }, ptr %dst, i32 0, i32 1
+  store double %src.real, ptr %dst.realp
+  store double %src.imag, ptr %dst.imagp
   ret void
 }
 
@@ -1114,151 +1114,151 @@ define void @store-pair-post-indexed-double() nounwind {
 ;
 ; with X being either w0, x0, s0, d0 or q0.
 
-define void @post-indexed-sub-word(i32* %a, i32* %b, i64 %count) nounwind {
+define void @post-indexed-sub-word(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-sub-word
 ; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}], #-8
 ; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}], #-8
   br label %for.body
 for.body:
-  %phi1 = phi i32* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi i32* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr i32, i32* %phi1, i64 -1
-  %load1 = load i32, i32* %gep1
-  %gep2 = getelementptr i32, i32* %phi2, i64 -1
-  store i32 %load1, i32* %gep2
-  %load2 = load i32, i32* %phi1
-  store i32 %load2, i32* %phi2
+  %gep1 = getelementptr i32, ptr %phi1, i64 -1
+  %load1 = load i32, ptr %gep1
+  %gep2 = getelementptr i32, ptr %phi2, i64 -1
+  store i32 %load1, ptr %gep2
+  %load2 = load i32, ptr %phi1
+  store i32 %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr i32, i32* %phi2, i64 -2
-  %gep4 = getelementptr i32, i32* %phi1, i64 -2
+  %gep3 = getelementptr i32, ptr %phi2, i64 -2
+  %gep4 = getelementptr i32, ptr %phi1, i64 -2
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-sub-doubleword(i64* %a, i64* %b, i64 %count) nounwind {
+define void @post-indexed-sub-doubleword(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-sub-doubleword
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #-16
 ; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #-16
   br label %for.body
 for.body:
-  %phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr i64, i64* %phi1, i64 -1
-  %load1 = load i64, i64* %gep1
-  %gep2 = getelementptr i64, i64* %phi2, i64 -1
-  store i64 %load1, i64* %gep2
-  %load2 = load i64, i64* %phi1
-  store i64 %load2, i64* %phi2
+  %gep1 = getelementptr i64, ptr %phi1, i64 -1
+  %load1 = load i64, ptr %gep1
+  %gep2 = getelementptr i64, ptr %phi2, i64 -1
+  store i64 %load1, ptr %gep2
+  %load2 = load i64, ptr %phi1
+  store i64 %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr i64, i64* %phi2, i64 -2
-  %gep4 = getelementptr i64, i64* %phi1, i64 -2
+  %gep3 = getelementptr i64, ptr %phi2, i64 -2
+  %gep4 = getelementptr i64, ptr %phi1, i64 -2
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-sub-quadword(<2 x i64>* %a, <2 x i64>* %b, i64 %count) nounwind {
+define void @post-indexed-sub-quadword(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-sub-quadword
 ; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}], #-32
 ; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}], #-32
   br label %for.body
 for.body:
-  %phi1 = phi <2 x i64>* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi <2 x i64>* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr <2 x i64>, <2 x i64>* %phi1, i64 -1
-  %load1 = load <2 x i64>, <2 x i64>* %gep1
-  %gep2 = getelementptr <2 x i64>, <2 x i64>* %phi2, i64 -1
-  store <2 x i64> %load1, <2 x i64>* %gep2
-  %load2 = load <2 x i64>, <2 x i64>* %phi1
-  store <2 x i64> %load2, <2 x i64>* %phi2
+  %gep1 = getelementptr <2 x i64>, ptr %phi1, i64 -1
+  %load1 = load <2 x i64>, ptr %gep1
+  %gep2 = getelementptr <2 x i64>, ptr %phi2, i64 -1
+  store <2 x i64> %load1, ptr %gep2
+  %load2 = load <2 x i64>, ptr %phi1
+  store <2 x i64> %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr <2 x i64>, <2 x i64>* %phi2, i64 -2
-  %gep4 = getelementptr <2 x i64>, <2 x i64>* %phi1, i64 -2
+  %gep3 = getelementptr <2 x i64>, ptr %phi2, i64 -2
+  %gep4 = getelementptr <2 x i64>, ptr %phi1, i64 -2
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-sub-float(float* %a, float* %b, i64 %count) nounwind {
+define void @post-indexed-sub-float(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-sub-float
 ; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}], #-8
 ; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}], #-8
   br label %for.body
 for.body:
-  %phi1 = phi float* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi float* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr float, float* %phi1, i64 -1
-  %load1 = load float, float* %gep1
-  %gep2 = getelementptr float, float* %phi2, i64 -1
-  store float %load1, float* %gep2
-  %load2 = load float, float* %phi1
-  store float %load2, float* %phi2
+  %gep1 = getelementptr float, ptr %phi1, i64 -1
+  %load1 = load float, ptr %gep1
+  %gep2 = getelementptr float, ptr %phi2, i64 -1
+  store float %load1, ptr %gep2
+  %load2 = load float, ptr %phi1
+  store float %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr float, float* %phi2, i64 -2
-  %gep4 = getelementptr float, float* %phi1, i64 -2
+  %gep3 = getelementptr float, ptr %phi2, i64 -2
+  %gep4 = getelementptr float, ptr %phi1, i64 -2
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-sub-double(double* %a, double* %b, i64 %count) nounwind {
+define void @post-indexed-sub-double(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-sub-double
 ; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}], #-16
 ; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}], #-16
   br label %for.body
 for.body:
-  %phi1 = phi double* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi double* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr double, double* %phi1, i64 -1
-  %load1 = load double, double* %gep1
-  %gep2 = getelementptr double, double* %phi2, i64 -1
-  store double %load1, double* %gep2
-  %load2 = load double, double* %phi1
-  store double %load2, double* %phi2
+  %gep1 = getelementptr double, ptr %phi1, i64 -1
+  %load1 = load double, ptr %gep1
+  %gep2 = getelementptr double, ptr %phi2, i64 -1
+  store double %load1, ptr %gep2
+  %load2 = load double, ptr %phi1
+  store double %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr double, double* %phi2, i64 -2
-  %gep4 = getelementptr double, double* %phi1, i64 -2
+  %gep3 = getelementptr double, ptr %phi2, i64 -2
+  %gep4 = getelementptr double, ptr %phi1, i64 -2
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-sub-doubleword-offset-min(i64* %a, i64* %b, i64 %count) nounwind {
+define void @post-indexed-sub-doubleword-offset-min(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-sub-doubleword-offset-min
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #-256
 ; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #-256
   br label %for.body
 for.body:
-  %phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr i64, i64* %phi1, i64 1
-  %load1 = load i64, i64* %gep1
-  %gep2 = getelementptr i64, i64* %phi2, i64 1
-  store i64 %load1, i64* %gep2
-  %load2 = load i64, i64* %phi1
-  store i64 %load2, i64* %phi2
+  %gep1 = getelementptr i64, ptr %phi1, i64 1
+  %load1 = load i64, ptr %gep1
+  %gep2 = getelementptr i64, ptr %phi2, i64 1
+  store i64 %load1, ptr %gep2
+  %load2 = load i64, ptr %phi1
+  store i64 %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr i64, i64* %phi2, i64 -32
-  %gep4 = getelementptr i64, i64* %phi1, i64 -32
+  %gep3 = getelementptr i64, ptr %phi2, i64 -32
+  %gep4 = getelementptr i64, ptr %phi1, i64 -32
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-doubleword-offset-out-of-range(i64* %a, i64* %b, i64 %count) nounwind {
+define void @post-indexed-doubleword-offset-out-of-range(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-doubleword-offset-out-of-range
 ; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}]
 ; CHECK: add x{{[0-9]+}}, x{{[0-9]+}}, #256
@@ -1267,49 +1267,49 @@ define void @post-indexed-doubleword-offset-out-of-range(i64* %a, i64* %b, i64 %
 
   br label %for.body
 for.body:
-  %phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr i64, i64* %phi1, i64 1
-  %load1 = load i64, i64* %gep1
-  %gep2 = getelementptr i64, i64* %phi2, i64 1
-  store i64 %load1, i64* %gep2
-  %load2 = load i64, i64* %phi1
-  store i64 %load2, i64* %phi2
+  %gep1 = getelementptr i64, ptr %phi1, i64 1
+  %load1 = load i64, ptr %gep1
+  %gep2 = getelementptr i64, ptr %phi2, i64 1
+  store i64 %load1, ptr %gep2
+  %load2 = load i64, ptr %phi1
+  store i64 %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr i64, i64* %phi2, i64 32
-  %gep4 = getelementptr i64, i64* %phi1, i64 32
+  %gep3 = getelementptr i64, ptr %phi2, i64 32
+  %gep4 = getelementptr i64, ptr %phi1, i64 32
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-paired-min-offset(i64* %a, i64* %b, i64 %count) nounwind {
+define void @post-indexed-paired-min-offset(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-paired-min-offset
 ; CHECK: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}], #-512
 ; CHECK: stp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}], #-512
   br label %for.body
 for.body:
-  %phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr i64, i64* %phi1, i64 1
-  %load1 = load i64, i64* %gep1
-  %gep2 = getelementptr i64, i64* %phi2, i64 1
-  %load2 = load i64, i64* %phi1
-  store i64 %load1, i64* %gep2
-  store i64 %load2, i64* %phi2
+  %gep1 = getelementptr i64, ptr %phi1, i64 1
+  %load1 = load i64, ptr %gep1
+  %gep2 = getelementptr i64, ptr %phi2, i64 1
+  %load2 = load i64, ptr %phi1
+  store i64 %load1, ptr %gep2
+  store i64 %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr i64, i64* %phi2, i64 -64
-  %gep4 = getelementptr i64, i64* %phi1, i64 -64
+  %gep3 = getelementptr i64, ptr %phi2, i64 -64
+  %gep4 = getelementptr i64, ptr %phi1, i64 -64
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
   ret void
 }
 
-define void @post-indexed-paired-offset-out-of-range(i64* %a, i64* %b, i64 %count) nounwind {
+define void @post-indexed-paired-offset-out-of-range(ptr %a, ptr %b, i64 %count) nounwind {
 ; CHECK-LABEL: post-indexed-paired-offset-out-of-range
 ; CHECK: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x{{[0-9]+}}]
 ; CHECK: add x{{[0-9]+}}, x{{[0-9]+}}, #512
@@ -1317,18 +1317,18 @@ define void @post-indexed-paired-offset-out-of-range(i64* %a, i64* %b, i64 %coun
 ; CHECK: add x{{[0-9]+}}, x{{[0-9]+}}, #512
   br label %for.body
 for.body:
-  %phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
-  %phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
+  %phi1 = phi ptr [ %gep4, %for.body ], [ %b, %0 ]
+  %phi2 = phi ptr [ %gep3, %for.body ], [ %a, %0 ]
   %i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
-  %gep1 = getelementptr i64, i64* %phi1, i64 1
-  %load1 = load i64, i64* %phi1
-  %gep2 = getelementptr i64, i64* %phi2, i64 1
-  %load2 = load i64, i64* %gep1
-  store i64 %load1, i64* %gep2
-  store i64 %load2, i64* %phi2
+  %gep1 = getelementptr i64, ptr %phi1, i64 1
+  %load1 = load i64, ptr %phi1
+  %gep2 = getelementptr i64, ptr %phi2, i64 1
+  %load2 = load i64, ptr %gep1
+  store i64 %load1, ptr %gep2
+  store i64 %load2, ptr %phi2
   %dec.i = add nsw i64 %i, -1
-  %gep3 = getelementptr i64, i64* %phi2, i64 64
-  %gep4 = getelementptr i64, i64* %phi1, i64 64
+  %gep3 = getelementptr i64, ptr %phi2, i64 64
+  %gep4 = getelementptr i64, ptr %phi1, i64 64
   %cond = icmp sgt i64 %dec.i, 0
   br i1 %cond, label %for.body, label %end
 end:
@@ -1338,21 +1338,21 @@ end:
 ; DAGCombiner::MergeConsecutiveStores merges this into a vector store,
 ; replaceZeroVectorStore should split the vector store back into
 ; scalar stores which should get merged by AArch64LoadStoreOptimizer.
-define void @merge_zr32(i32* %p) {
+define void @merge_zr32(ptr %p) {
 ; CHECK-LABEL: merge_zr32:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}]
 ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store i32 0, i32* %p
-  %p1 = getelementptr i32, i32* %p, i32 1
-  store i32 0, i32* %p1
+  store i32 0, ptr %p
+  %p1 = getelementptr i32, ptr %p, i32 1
+  store i32 0, ptr %p1
   ret void
 }
 
 ; Same as merge_zr32 but the merged stores should also get paried.
-define void @merge_zr32_2(i32* %p) {
+define void @merge_zr32_2(ptr %p) {
 ; CHECK-LABEL: merge_zr32_2:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}]
@@ -1360,18 +1360,18 @@ define void @merge_zr32_2(i32* %p) {
 ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #8]
 ; CHECK-NEXT: ret
 entry:
-  store i32 0, i32* %p
-  %p1 = getelementptr i32, i32* %p, i32 1
-  store i32 0, i32* %p1
-  %p2 = getelementptr i32, i32* %p, i64 2
-  store i32 0, i32* %p2
-  %p3 = getelementptr i32, i32* %p, i64 3
-  store i32 0, i32* %p3
+  store i32 0, ptr %p
+  %p1 = getelementptr i32, ptr %p, i32 1
+  store i32 0, ptr %p1
+  %p2 = getelementptr i32, ptr %p, i64 2
+  store i32 0, ptr %p2
+  %p3 = getelementptr i32, ptr %p, i64 3
+  store i32 0, ptr %p3
   ret void
 }
 
 ; Like merge_zr32_2, but checking the largest allowed stp immediate offset.
-define void @merge_zr32_2_offset(i32* %p) {
+define void @merge_zr32_2_offset(ptr %p) {
 ; CHECK-LABEL: merge_zr32_2_offset:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}, #504]
@@ -1381,21 +1381,21 @@ define void @merge_zr32_2_offset(i32* %p) {
 ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #516]
 ; CHECK-NEXT: ret
 entry:
-  %p0 = getelementptr i32, i32* %p, i32 126
-  store i32 0, i32* %p0
-  %p1 = getelementptr i32, i32* %p, i32 127
-  store i32 0, i32* %p1
-  %p2 = getelementptr i32, i32* %p, i64 128
-  store i32 0, i32* %p2
-  %p3 = getelementptr i32, i32* %p, i64 129
-  store i32 0, i32* %p3
+  %p0 = getelementptr i32, ptr %p, i32 126
+  store i32 0, ptr %p0
+  %p1 = getelementptr i32, ptr %p, i32 127
+  store i32 0, ptr %p1
+  %p2 = getelementptr i32, ptr %p, i64 128
+  store i32 0, ptr %p2
+  %p3 = getelementptr i32, ptr %p, i64 129
+  store i32 0, ptr %p3
   ret void
 }
 
 ; Like merge_zr32, but replaceZeroVectorStore should not split this
 ; vector store since the address offset is too large for the stp
 ; instruction.
-define void @no_merge_zr32_2_offset(i32* %p) {
+define void @no_merge_zr32_2_offset(ptr %p) {
 ; CHECK-LABEL: no_merge_zr32_2_offset:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000
@@ -1406,21 +1406,21 @@ define void @no_merge_zr32_2_offset(i32* %p) {
 ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #4108]
 ; CHECK-NEXT: ret
 entry:
-  %p0 = getelementptr i32, i32* %p, i32 1024
-  store i32 0, i32* %p0
-  %p1 = getelementptr i32, i32* %p, i32 1025
-  store i32 0, i32* %p1
-  %p2 = getelementptr i32, i32* %p, i64 1026
-  store i32 0, i32* %p2
-  %p3 = getelementptr i32, i32* %p, i64 1027
-  store i32 0, i32* %p3
+  %p0 = getelementptr i32, ptr %p, i32 1024
+  store i32 0, ptr %p0
+  %p1 = getelementptr i32, ptr %p, i32 1025
+  store i32 0, ptr %p1
+  %p2 = getelementptr i32, ptr %p, i64 1026
+  store i32 0, ptr %p2
+  %p3 = getelementptr i32, ptr %p, i64 1027
+  store i32 0, ptr %p3
   ret void
 }
 
 ; Like merge_zr32, but replaceZeroVectorStore should not split the
 ; vector store since the zero constant vector has multiple uses, so we
 ; err on the side that allows for stp q instruction generation.
-define void @merge_zr32_3(i32* %p) {
+define void @merge_zr32_3(ptr %p) {
 ; CHECK-LABEL: merge_zr32_3:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000
@@ -1431,38 +1431,38 @@ define void @merge_zr32_3(i32* %p) {
 ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}, #24]
 ; CHECK-NEXT: ret
 entry:
-  store i32 0, i32* %p
-  %p1 = getelementptr i32, i32* %p, i32 1
-  store i32 0, i32* %p1
-  %p2 = getelementptr i32, i32* %p, i64 2
-  store i32 0, i32* %p2
-  %p3 = getelementptr i32, i32* %p, i64 3
-  store i32 0, i32* %p3
-  %p4 = getelementptr i32, i32* %p, i64 4
-  store i32 0, i32* %p4
-  %p5 = getelementptr i32, i32* %p, i64 5
-  store i32 0, i32* %p5
-  %p6 = getelementptr i32, i32* %p, i64 6
-  store i32 0, i32* %p6
-  %p7 = getelementptr i32, i32* %p, i64 7
-  store i32 0, i32* %p7
+  store i32 0, ptr %p
+  %p1 = getelementptr i32, ptr %p, i32 1
+  store i32 0, ptr %p1
+  %p2 = getelementptr i32, ptr %p, i64 2
+  store i32 0, ptr %p2
+  %p3 = getelementptr i32, ptr %p, i64 3
+  store i32 0, ptr %p3
+  %p4 = getelementptr i32, ptr %p, i64 4
+  store i32 0, ptr %p4
+  %p5 = getelementptr i32, ptr %p, i64 5
+  store i32 0, ptr %p5
+  %p6 = getelementptr i32, ptr %p, i64 6
+  store i32 0, ptr %p6
+  %p7 = getelementptr i32, ptr %p, i64 7
+  store i32 0, ptr %p7
   ret void
 }
 
 ; Like merge_zr32, but with 2-vector type.
-define void @merge_zr32_2vec(<2 x i32>* %p) {
+define void @merge_zr32_2vec(ptr %p) {
 ; CHECK-LABEL: merge_zr32_2vec:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}]
 ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <2 x i32> zeroinitializer, <2 x i32>* %p
+  store <2 x i32> zeroinitializer, ptr %p
   ret void
 }
 
 ; Like merge_zr32, but with 3-vector type.
-define void @merge_zr32_3vec(<3 x i32>* %p) {
+define void @merge_zr32_3vec(ptr %p) {
 ; CHECK-LABEL: merge_zr32_3vec:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}, #8]
@@ -1471,12 +1471,12 @@ define void @merge_zr32_3vec(<3 x i32>* %p) {
 ; STRICTALIGN-NEXT: str wzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <3 x i32> zeroinitializer, <3 x i32>* %p
+  store <3 x i32> zeroinitializer, ptr %p
   ret void
 }
 
 ; Like merge_zr32, but with 4-vector type.
-define void @merge_zr32_4vec(<4 x i32>* %p) {
+define void @merge_zr32_4vec(ptr %p) {
 ; CHECK-LABEL: merge_zr32_4vec:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}]
@@ -1484,24 +1484,24 @@ define void @merge_zr32_4vec(<4 x i32>* %p) {
 ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <4 x i32> zeroinitializer, <4 x i32>* %p
+  store <4 x i32> zeroinitializer, ptr %p
   ret void
 }
 
 ; Like merge_zr32, but with 2-vector float type.
-define void @merge_zr32_2vecf(<2 x float>* %p) {
+define void @merge_zr32_2vecf(ptr %p) {
 ; CHECK-LABEL: merge_zr32_2vecf:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: str xzr, [x{{[0-9]+}}]
 ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <2 x float> zeroinitializer, <2 x float>* %p
+  store <2 x float> zeroinitializer, ptr %p
   ret void
 }
 
 ; Like merge_zr32, but with 4-vector float type.
-define void @merge_zr32_4vecf(<4 x float>* %p) {
+define void @merge_zr32_4vecf(ptr %p) {
 ; CHECK-LABEL: merge_zr32_4vecf:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}]
@@ -1509,25 +1509,25 @@ define void @merge_zr32_4vecf(<4 x float>* %p) {
 ; STRICTALIGN-NEXT: stp wzr, wzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <4 x float> zeroinitializer, <4 x float>* %p
+  store <4 x float> zeroinitializer, ptr %p
   ret void
 }
 
 ; Similar to merge_zr32, but for 64-bit values.
-define void @merge_zr64(i64* %p) {
+define void @merge_zr64(ptr %p) {
 ; CHECK-LABEL: merge_zr64:
 ; CHECK: // %entry
 ; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store i64 0, i64* %p
-  %p1 = getelementptr i64, i64* %p, i64 1
-  store i64 0, i64* %p1
+  store i64 0, ptr %p
+  %p1 = getelementptr i64, ptr %p, i64 1
+  store i64 0, ptr %p1
   ret void
 }
 
 ; Similar to merge_zr32, but for 64-bit values and with unaligned stores.
-define void @merge_zr64_unalign(<2 x i64>* %p) {
+define void @merge_zr64_unalign(ptr %p) {
 ; CHECK-LABEL: merge_zr64_unalign:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}]
@@ -1549,13 +1549,13 @@ define void @merge_zr64_unalign(<2 x i64>* %p) {
 ; STRICTALIGN: strb
 ; CHECK-NEXT: ret
 entry:
-  store <2 x i64> zeroinitializer, <2 x i64>* %p, align 1
+  store <2 x i64> zeroinitializer, ptr %p, align 1
   ret void
 }
 
 ; Similar to merge_zr32_3, replaceZeroVectorStore should not split the
 ; vector store since the zero constant vector has multiple uses.
-define void @merge_zr64_2(i64* %p) {
+define void @merge_zr64_2(ptr %p) {
 ; CHECK-LABEL: merge_zr64_2:
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000
@@ -1564,53 +1564,53 @@ define void @merge_zr64_2(i64* %p) {
 ; STRICTALIGN-NEXT: stp xzr, xzr, [x{{[0-9]+}}, #16]
 ; CHECK-NEXT: ret
 entry:
-  store i64 0, i64* %p
-  %p1 = getelementptr i64, i64* %p, i64 1
-  store i64 0, i64* %p1
-  %p2 = getelementptr i64, i64* %p, i64 2
-  store i64 0, i64* %p2
-  %p3 = getelementptr i64, i64* %p, i64 3
-  store i64 0, i64* %p3
+  store i64 0, ptr %p
+  %p1 = getelementptr i64, ptr %p, i64 1
+  store i64 0, ptr %p1
+  %p2 = getelementptr i64, ptr %p, i64 2
+  store i64 0, ptr %p2
+  %p3 = getelementptr i64, ptr %p, i64 3
+  store i64 0, ptr %p3
   ret void
 }
 
 ; Like merge_zr64, but with 2-vector double type.
-define void @merge_zr64_2vecd(<2 x double>* %p) {
+define void @merge_zr64_2vecd(ptr %p) {
 ; CHECK-LABEL: merge_zr64_2vecd:
 ; CHECK: // %entry
 ; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <2 x double> zeroinitializer, <2 x double>* %p
+  store <2 x double> zeroinitializer, ptr %p
   ret void
 }
 
 ; Like merge_zr64, but with 3-vector i64 type.
-define void @merge_zr64_3vec(<3 x i64>* %p) {
+define void @merge_zr64_3vec(ptr %p) {
 ; CHECK-LABEL: merge_zr64_3vec:
 ; CHECK: // %entry
 ; CHECK-NEXT: stp xzr, xzr, [x{{[0-9]+}}, #8]
 ; CHECK-NEXT: str xzr, [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <3 x i64> zeroinitializer, <3 x i64>* %p
+  store <3 x i64> zeroinitializer, ptr %p
   ret void
 }
 
 ; Like merge_zr64_2, but with 4-vector double type.
-define void @merge_zr64_4vecd(<4 x double>* %p) {
+define void @merge_zr64_4vecd(ptr %p) {
 ; CHECK-LABEL: merge_zr64_4vecd:
 ; CHECK: // %entry
 ; CHECK-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000
 ; CHECK-NEXT: stp q[[REG]], q[[REG]], [x{{[0-9]+}}]
 ; CHECK-NEXT: ret
 entry:
-  store <4 x double> zeroinitializer, <4 x double>* %p
+  store <4 x double> zeroinitializer, ptr %p
   ret void
 }
 
 ; Verify that non-consecutive merges do not generate q0
-define void @merge_multiple_128bit_stores(i64* %p) {
+define void @merge_multiple_128bit_stores(ptr %p) {
 ; CHECK-LABEL: merge_multiple_128bit_stores
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000
@@ -1622,22 +1622,22 @@ define void @merge_multiple_128bit_stores(i64* %p) {
 ; STRICTALIGN-NEXT: stp xzr, xzr, [x0, #48]
 ; CHECK-NEXT: ret
 entry:
-  store i64 0, i64* %p
-  %p1 = getelementptr i64, i64* %p, i64 1
-  store i64 0, i64* %p1
-  %p3 = getelementptr i64, i64* %p, i64 3
-  store i64 0, i64* %p3
-  %p4 = getelementptr i64, i64* %p, i64 4
-  store i64 0, i64* %p4
-  %p6 = getelementptr i64, i64* %p, i64 6
-  store i64 0, i64* %p6
-  %p7 = getelementptr i64, i64* %p, i64 7
-  store i64 0, i64* %p7
+  store i64 0, ptr %p
+  %p1 = getelementptr i64, ptr %p, i64 1
+  store i64 0, ptr %p1
+  %p3 = getelementptr i64, ptr %p, i64 3
+  store i64 0, ptr %p3
+  %p4 = getelementptr i64, ptr %p, i64 4
+  store i64 0, ptr %p4
+  %p6 = getelementptr i64, ptr %p, i64 6
+  store i64 0, ptr %p6
+  %p7 = getelementptr i64, ptr %p, i64 7
+  store i64 0, ptr %p7
   ret void
 }
 
 ; Verify that large stores generate stp q
-define void @merge_multiple_128bit_stores_consec(i64* %p) {
+define void @merge_multiple_128bit_stores_consec(ptr %p) {
 ; CHECK-LABEL: merge_multiple_128bit_stores_consec
 ; CHECK: // %entry
 ; NOSTRICTALIGN-NEXT: movi v[[REG:[0-9]]].2d, #0000000000000000
@@ -1649,21 +1649,21 @@ define void @merge_multiple_128bit_stores_consec(i64* %p) {
 ; STRICTALIGN-NEXT: stp  xzr, xzr, [x0, #48]
 ; CHECK-NEXT: ret
 entry:
-  store i64 0, i64* %p
-  %p1 = getelementptr i64, i64* %p, i64 1
-  store i64 0, i64* %p1
-  %p2 = getelementptr i64, i64* %p, i64 2
-  store i64 0, i64* %p2
-  %p3 = getelementptr i64, i64* %p, i64 3
-  store i64 0, i64* %p3
-  %p4 = getelementptr i64, i64* %p, i64 4
-  store i64 0, i64* %p4
-  %p5 = getelementptr i64, i64* %p, i64 5
-  store i64 0, i64* %p5
-  %p6 = getelementptr i64, i64* %p, i64 6
-  store i64 0, i64* %p6
-  %p7 = getelementptr i64, i64* %p, i64 7
-  store i64 0, i64* %p7
+  store i64 0, ptr %p
+  %p1 = getelementptr i64, ptr %p, i64 1
+  store i64 0, ptr %p1
+  %p2 = getelementptr i64, ptr %p, i64 2
+  store i64 0, ptr %p2
+  %p3 = getelementptr i64, ptr %p, i64 3
+  store i64 0, ptr %p3
+  %p4 = getelementptr i64, ptr %p, i64 4
+  store i64 0, ptr %p4
+  %p5 = getelementptr i64, ptr %p, i64 5
+  store i64 0, ptr %p5
+  %p6 = getelementptr i64, ptr %p, i64 6
+  store i64 0, ptr %p6
+  %p7 = getelementptr i64, ptr %p, i64 7
+  store i64 0, ptr %p7
   ret void
 }
 
@@ -1674,19 +1674,18 @@ entry:
 ; CHECK-NEXT: mov x8, x0
 ; CHECK-NEXT: add x0, [[ZREG]], #1
 ; CHECK-NEXT: stp xzr, xzr, [x8]
-define i64 @bug34674(<2 x i64>* %p) {
+define i64 @bug34674(ptr %p) {
 entry:
-  store <2 x i64> zeroinitializer, <2 x i64>* %p
-  %p2 = bitcast <2 x i64>* %p to i64*
-  %ld = load i64, i64* %p2
+  store <2 x i64> zeroinitializer, ptr %p
+  %ld = load i64, ptr %p
   %add = add i64 %ld, 1
   ret i64 %add
 }
 
 ; CHECK-LABEL: trunc_splat_zero:
 ; CHECK-DAG: strh wzr, [x0]
-define void @trunc_splat_zero(<2 x i8>* %ptr) {
-  store <2 x i8> zeroinitializer, <2 x i8>* %ptr, align 2
+define void @trunc_splat_zero(ptr %ptr) {
+  store <2 x i8> zeroinitializer, ptr %ptr, align 2
   ret void
 }
 
@@ -1694,7 +1693,7 @@ define void @trunc_splat_zero(<2 x i8>* %ptr) {
 ; CHECK: mov [[VAL:w[0-9]+]], #42
 ; CHECK: movk [[VAL]], #42, lsl #16
 ; CHECK: str [[VAL]], [x0]
-define void @trunc_splat(<2 x i16>* %ptr) {
-  store <2 x i16> <i16 42, i16 42>, <2 x i16>* %ptr, align 4
+define void @trunc_splat(ptr %ptr) {
+  store <2 x i16> <i16 42, i16 42>, ptr %ptr, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
index 4a9aa44ec839b..5534568f72cd3 100644
--- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
@@ -3,10 +3,10 @@
 target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-declare void @f(i8*, i8*)
-declare void @f2(i8*, i8*)
+declare void @f(ptr, ptr)
+declare void @f2(ptr, ptr)
 declare void @_Z5setupv()
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) #3
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #3
 
 define i32 @main() local_unnamed_addr #1 {
 ; Make sure the stores happen in the correct order (the exact instructions could change).
@@ -41,30 +41,24 @@ define i32 @main() local_unnamed_addr #1 {
 
 for.body.lr.ph.i.i.i.i.i.i63:
   %b1 = alloca [10 x i32], align 16
-  %x0 = bitcast [10 x i32]* %b1 to i8*
   %b2 = alloca [10 x i32], align 16
-  %x1 = bitcast [10 x i32]* %b2 to i8*
   tail call void @_Z5setupv()
-  %x2 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 6
-  %x3 = bitcast i32* %x2 to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 8 %x3, i8 0, i64 16, i1 false)
-  %arraydecay2 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 0
-  %x4 = bitcast [10 x i32]* %b1 to <4 x i32>*
-  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>* %x4, align 16
-  %incdec.ptr.i7.i.i.i.i.i.i64.3 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 4
-  %x5 = bitcast i32* %incdec.ptr.i7.i.i.i.i.i.i64.3 to <4 x i32>*
-  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>* %x5, align 16
-  %incdec.ptr.i7.i.i.i.i.i.i64.7 = getelementptr inbounds [10 x i32], [10 x i32]* %b1, i64 0, i64 8
-  store i32 1, i32* %incdec.ptr.i7.i.i.i.i.i.i64.7, align 16
-  %x6 = load i32, i32* %arraydecay2, align 16
+  %x2 = getelementptr inbounds [10 x i32], ptr %b1, i64 0, i64 6
+  call void @llvm.memset.p0.i64(ptr align 8 %x2, i8 0, i64 16, i1 false)
+  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr %b1, align 16
+  %incdec.ptr.i7.i.i.i.i.i.i64.3 = getelementptr inbounds [10 x i32], ptr %b1, i64 0, i64 4
+  store <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr %incdec.ptr.i7.i.i.i.i.i.i64.3, align 16
+  %incdec.ptr.i7.i.i.i.i.i.i64.7 = getelementptr inbounds [10 x i32], ptr %b1, i64 0, i64 8
+  store i32 1, ptr %incdec.ptr.i7.i.i.i.i.i.i64.7, align 16
+  %x6 = load i32, ptr %b1, align 16
   %cmp6 = icmp eq i32 %x6, 1
   br i1 %cmp6, label %for.inc, label %if.then
 
 for.inc:
-  call void @f(i8* %x0, i8* %x1)
+  call void @f(ptr %b1, ptr %b2)
   ret i32 0
 
 if.then:
-  call void @f2(i8* %x0, i8* %x1)
+  call void @f2(ptr %b1, ptr %b2)
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/AArch64/ldst-regoffset.ll b/llvm/test/CodeGen/AArch64/ldst-regoffset.ll
index 85d6db104b668..f6ed952615302 100644
--- a/llvm/test/CodeGen/AArch64/ldst-regoffset.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-regoffset.ll
@@ -9,343 +9,343 @@
 @var_float = global float 0.0
 @var_double = global double 0.0
 
-define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) minsize {
+define void @ldst_8bit(ptr %base, i32 %off32, i64 %off64) minsize {
 ; CHECK-LABEL: ldst_8bit:
 
-   %addr8_sxtw = getelementptr i8, i8* %base, i32 %off32
-   %val8_sxtw = load volatile i8, i8* %addr8_sxtw
+   %addr8_sxtw = getelementptr i8, ptr %base, i32 %off32
+   %val8_sxtw = load volatile i8, ptr %addr8_sxtw
    %val32_signed = sext i8 %val8_sxtw to i32
-   store volatile i32 %val32_signed, i32* @var_32bit
+   store volatile i32 %val32_signed, ptr @var_32bit
 ; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{[wx][0-9]+}}, sxtw]
 
-  %addr_lsl = getelementptr i8, i8* %base, i64 %off64
-  %val8_lsl = load volatile i8, i8* %addr_lsl
+  %addr_lsl = getelementptr i8, ptr %base, i64 %off64
+  %val8_lsl = load volatile i8, ptr %addr_lsl
   %val32_unsigned = zext i8 %val8_lsl to i32
-  store volatile i32 %val32_unsigned, i32* @var_32bit
+  store volatile i32 %val32_unsigned, ptr @var_32bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 
-  %addrint_uxtw = ptrtoint i8* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
-  %val8_uxtw = load volatile i8, i8* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val8_uxtw = load volatile i8, ptr %addr_uxtw
   %newval8 = add i8 %val8_uxtw, 1
-  store volatile i8 %newval8, i8* @var_8bit
+  store volatile i8 %newval8, ptr @var_8bit
 ; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 
    ret void
 }
 
 
-define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) minsize {
+define void @ldst_16bit(ptr %base, i32 %off32, i64 %off64) minsize {
 ; CHECK-LABEL: ldst_16bit:
 
-   %addr8_sxtwN = getelementptr i16, i16* %base, i32 %off32
-   %val8_sxtwN = load volatile i16, i16* %addr8_sxtwN
+   %addr8_sxtwN = getelementptr i16, ptr %base, i32 %off32
+   %val8_sxtwN = load volatile i16, ptr %addr8_sxtwN
    %val32_signed = sext i16 %val8_sxtwN to i32
-   store volatile i32 %val32_signed, i32* @var_32bit
+   store volatile i32 %val32_signed, ptr @var_32bit
 ; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #1]
 
-  %addr_lslN = getelementptr i16, i16* %base, i64 %off64
-  %val8_lslN = load volatile i16, i16* %addr_lslN
+  %addr_lslN = getelementptr i16, ptr %base, i64 %off64
+  %val8_lslN = load volatile i16, ptr %addr_lslN
   %val32_unsigned = zext i16 %val8_lslN to i32
-  store volatile i32 %val32_unsigned, i32* @var_32bit
+  store volatile i32 %val32_unsigned, ptr @var_32bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #1]
 
-  %addrint_uxtw = ptrtoint i16* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
-  %val8_uxtw = load volatile i16, i16* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val8_uxtw = load volatile i16, ptr %addr_uxtw
   %newval8 = add i16 %val8_uxtw, 1
-  store volatile i16 %newval8, i16* @var_16bit
+  store volatile i16 %newval8, ptr @var_16bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 
-  %base_sxtw = ptrtoint i16* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
-  %val16_sxtw = load volatile i16, i16* %addr_sxtw
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val16_sxtw = load volatile i16, ptr %addr_sxtw
   %val64_signed = sext i16 %val16_sxtw to i64
-  store volatile i64 %val64_signed, i64* @var_64bit
+  store volatile i64 %val64_signed, ptr @var_64bit
 ; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{[wx][0-9]+}}, sxtw]
 
 
-  %base_lsl = ptrtoint i16* %base to i64
+  %base_lsl = ptrtoint ptr %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
-  %addr_lsl = inttoptr i64 %addrint_lsl to i16*
-  %val16_lsl = load volatile i16, i16* %addr_lsl
+  %addr_lsl = inttoptr i64 %addrint_lsl to ptr
+  %val16_lsl = load volatile i16, ptr %addr_lsl
   %val64_unsigned = zext i16 %val16_lsl to i64
-  store volatile i64 %val64_unsigned, i64* @var_64bit
+  store volatile i64 %val64_unsigned, ptr @var_64bit
 ; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 
-  %base_uxtwN = ptrtoint i16* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = zext i32 %off32 to i64
   %offset2_uxtwN = shl i64 %offset_uxtwN, 1
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
-  %val32 = load volatile i32, i32* @var_32bit
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val32 = load volatile i32, ptr @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
-  store volatile i16 %val16_trunc32, i16* %addr_uxtwN
+  store volatile i16 %val16_trunc32, ptr %addr_uxtwN
 ; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #1]
    ret void
 }
 
-define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) minsize {
+define void @ldst_32bit(ptr %base, i32 %off32, i64 %off64) minsize {
 ; CHECK-LABEL: ldst_32bit:
 
-   %addr_sxtwN = getelementptr i32, i32* %base, i32 %off32
-   %val_sxtwN = load volatile i32, i32* %addr_sxtwN
-   store volatile i32 %val_sxtwN, i32* @var_32bit
+   %addr_sxtwN = getelementptr i32, ptr %base, i32 %off32
+   %val_sxtwN = load volatile i32, ptr %addr_sxtwN
+   store volatile i32 %val_sxtwN, ptr @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
 
-  %addr_lslN = getelementptr i32, i32* %base, i64 %off64
-  %val_lslN = load volatile i32, i32* %addr_lslN
-  store volatile i32 %val_lslN, i32* @var_32bit
+  %addr_lslN = getelementptr i32, ptr %base, i64 %off64
+  %val_lslN = load volatile i32, ptr %addr_lslN
+  store volatile i32 %val_lslN, ptr @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
 
-  %addrint_uxtw = ptrtoint i32* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
-  %val_uxtw = load volatile i32, i32* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val_uxtw = load volatile i32, ptr %addr_uxtw
   %newval8 = add i32 %val_uxtw, 1
-  store volatile i32 %newval8, i32* @var_32bit
+  store volatile i32 %newval8, ptr @var_32bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 
 
-  %base_sxtw = ptrtoint i32* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
-  %val16_sxtw = load volatile i32, i32* %addr_sxtw
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val16_sxtw = load volatile i32, ptr %addr_sxtw
   %val64_signed = sext i32 %val16_sxtw to i64
-  store volatile i64 %val64_signed, i64* @var_64bit
+  store volatile i64 %val64_signed, ptr @var_64bit
 ; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 
 
-  %base_lsl = ptrtoint i32* %base to i64
+  %base_lsl = ptrtoint ptr %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
-  %addr_lsl = inttoptr i64 %addrint_lsl to i32*
-  %val16_lsl = load volatile i32, i32* %addr_lsl
+  %addr_lsl = inttoptr i64 %addrint_lsl to ptr
+  %val16_lsl = load volatile i32, ptr %addr_lsl
   %val64_unsigned = zext i32 %val16_lsl to i64
-  store volatile i64 %val64_unsigned, i64* @var_64bit
+  store volatile i64 %val64_unsigned, ptr @var_64bit
 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 
-  %base_uxtwN = ptrtoint i32* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = zext i32 %off32 to i64
   %offset2_uxtwN = shl i64 %offset_uxtwN, 2
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
-  %val32 = load volatile i32, i32* @var_32bit
-  store volatile i32 %val32, i32* %addr_uxtwN
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val32 = load volatile i32, ptr @var_32bit
+  store volatile i32 %val32, ptr %addr_uxtwN
 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #2]
    ret void
 }
 
-define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) minsize {
+define void @ldst_64bit(ptr %base, i32 %off32, i64 %off64) minsize {
 ; CHECK-LABEL: ldst_64bit:
 
-   %addr_sxtwN = getelementptr i64, i64* %base, i32 %off32
-   %val_sxtwN = load volatile i64, i64* %addr_sxtwN
-   store volatile i64 %val_sxtwN, i64* @var_64bit
+   %addr_sxtwN = getelementptr i64, ptr %base, i32 %off32
+   %val_sxtwN = load volatile i64, ptr %addr_sxtwN
+   store volatile i64 %val_sxtwN, ptr @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
 
-  %addr_lslN = getelementptr i64, i64* %base, i64 %off64
-  %val_lslN = load volatile i64, i64* %addr_lslN
-  store volatile i64 %val_lslN, i64* @var_64bit
+  %addr_lslN = getelementptr i64, ptr %base, i64 %off64
+  %val_lslN = load volatile i64, ptr %addr_lslN
+  store volatile i64 %val_lslN, ptr @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
 
-  %addrint_uxtw = ptrtoint i64* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
-  %val8_uxtw = load volatile i64, i64* %addr_uxtw
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val8_uxtw = load volatile i64, ptr %addr_uxtw
   %newval8 = add i64 %val8_uxtw, 1
-  store volatile i64 %newval8, i64* @var_64bit
+  store volatile i64 %newval8, ptr @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 
-  %base_sxtw = ptrtoint i64* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
-  %val64_sxtw = load volatile i64, i64* %addr_sxtw
-  store volatile i64 %val64_sxtw, i64* @var_64bit
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val64_sxtw = load volatile i64, ptr %addr_sxtw
+  store volatile i64 %val64_sxtw, ptr @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 
-  %base_lsl = ptrtoint i64* %base to i64
+  %base_lsl = ptrtoint ptr %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
-  %addr_lsl = inttoptr i64 %addrint_lsl to i64*
-  %val64_lsl = load volatile i64, i64* %addr_lsl
-  store volatile i64 %val64_lsl, i64* @var_64bit
+  %addr_lsl = inttoptr i64 %addrint_lsl to ptr
+  %val64_lsl = load volatile i64, ptr %addr_lsl
+  store volatile i64 %val64_lsl, ptr @var_64bit
 ; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 
-  %base_uxtwN = ptrtoint i64* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = zext i32 %off32 to i64
   %offset2_uxtwN = shl i64 %offset_uxtwN, 3
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
-  %val64 = load volatile i64, i64* @var_64bit
-  store volatile i64 %val64, i64* %addr_uxtwN
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val64 = load volatile i64, ptr @var_64bit
+  store volatile i64 %val64, ptr %addr_uxtwN
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #3]
    ret void
 }
 
-define void @ldst_float(float* %base, i32 %off32, i64 %off64) minsize {
+define void @ldst_float(ptr %base, i32 %off32, i64 %off64) minsize {
 ; CHECK-LABEL: ldst_float:
 
-   %addr_sxtwN = getelementptr float, float* %base, i32 %off32
-   %val_sxtwN = load volatile float, float* %addr_sxtwN
-   store volatile float %val_sxtwN, float* @var_float
+   %addr_sxtwN = getelementptr float, ptr %base, i32 %off32
+   %val_sxtwN = load volatile float, ptr %addr_sxtwN
+   store volatile float %val_sxtwN, ptr @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
 
-  %addr_lslN = getelementptr float, float* %base, i64 %off64
-  %val_lslN = load volatile float, float* %addr_lslN
-  store volatile float %val_lslN, float* @var_float
+  %addr_lslN = getelementptr float, ptr %base, i64 %off64
+  %val_lslN = load volatile float, ptr %addr_lslN
+  store volatile float %val_lslN, ptr @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
 
-  %addrint_uxtw = ptrtoint float* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to float*
-  %val_uxtw = load volatile float, float* %addr_uxtw
-  store volatile float %val_uxtw, float* @var_float
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val_uxtw = load volatile float, ptr %addr_uxtw
+  store volatile float %val_uxtw, ptr @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
 
-  %base_sxtw = ptrtoint float* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to float*
-  %val64_sxtw = load volatile float, float* %addr_sxtw
-  store volatile float %val64_sxtw, float* @var_float
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val64_sxtw = load volatile float, ptr %addr_sxtw
+  store volatile float %val64_sxtw, ptr @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
 
-  %base_lsl = ptrtoint float* %base to i64
+  %base_lsl = ptrtoint ptr %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
-  %addr_lsl = inttoptr i64 %addrint_lsl to float*
-  %val64_lsl = load volatile float, float* %addr_lsl
-  store volatile float %val64_lsl, float* @var_float
+  %addr_lsl = inttoptr i64 %addrint_lsl to ptr
+  %val64_lsl = load volatile float, ptr %addr_lsl
+  store volatile float %val64_lsl, ptr @var_float
 ; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
 
-  %base_uxtwN = ptrtoint float* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = zext i32 %off32 to i64
   %offset2_uxtwN = shl i64 %offset_uxtwN, 2
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to float*
-  %val64 = load volatile float, float* @var_float
-  store volatile float %val64, float* %addr_uxtwN
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val64 = load volatile float, ptr @var_float
+  store volatile float %val64, ptr %addr_uxtwN
 ; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #2]
 ; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
    ret void
 }
 
-define void @ldst_double(double* %base, i32 %off32, i64 %off64) minsize {
+define void @ldst_double(ptr %base, i32 %off32, i64 %off64) minsize {
 ; CHECK-LABEL: ldst_double:
 
-   %addr_sxtwN = getelementptr double, double* %base, i32 %off32
-   %val_sxtwN = load volatile double, double* %addr_sxtwN
-   store volatile double %val_sxtwN, double* @var_double
+   %addr_sxtwN = getelementptr double, ptr %base, i32 %off32
+   %val_sxtwN = load volatile double, ptr %addr_sxtwN
+   store volatile double %val_sxtwN, ptr @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
 
-  %addr_lslN = getelementptr double, double* %base, i64 %off64
-  %val_lslN = load volatile double, double* %addr_lslN
-  store volatile double %val_lslN, double* @var_double
+  %addr_lslN = getelementptr double, ptr %base, i64 %off64
+  %val_lslN = load volatile double, ptr %addr_lslN
+  store volatile double %val_lslN, ptr @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
 
-  %addrint_uxtw = ptrtoint double* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to double*
-  %val_uxtw = load volatile double, double* %addr_uxtw
-  store volatile double %val_uxtw, double* @var_double
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val_uxtw = load volatile double, ptr %addr_uxtw
+  store volatile double %val_uxtw, ptr @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
 
-  %base_sxtw = ptrtoint double* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to double*
-  %val64_sxtw = load volatile double, double* %addr_sxtw
-  store volatile double %val64_sxtw, double* @var_double
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val64_sxtw = load volatile double, ptr %addr_sxtw
+  store volatile double %val64_sxtw, ptr @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
 
-  %base_lsl = ptrtoint double* %base to i64
+  %base_lsl = ptrtoint ptr %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
-  %addr_lsl = inttoptr i64 %addrint_lsl to double*
-  %val64_lsl = load volatile double, double* %addr_lsl
-  store volatile double %val64_lsl, double* @var_double
+  %addr_lsl = inttoptr i64 %addrint_lsl to ptr
+  %val64_lsl = load volatile double, ptr %addr_lsl
+  store volatile double %val64_lsl, ptr @var_double
 ; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
 
-  %base_uxtwN = ptrtoint double* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = zext i32 %off32 to i64
   %offset2_uxtwN = shl i64 %offset_uxtwN, 3
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to double*
-  %val64 = load volatile double, double* @var_double
-  store volatile double %val64, double* %addr_uxtwN
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val64 = load volatile double, ptr @var_double
+  store volatile double %val64, ptr %addr_uxtwN
 ; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #3]
 ; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
    ret void
 }
 
 
-define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) minsize {
+define void @ldst_128bit(ptr %base, i32 %off32, i64 %off64) minsize {
 ; CHECK-LABEL: ldst_128bit:
 
-   %addr_sxtwN = getelementptr fp128, fp128* %base, i32 %off32
-   %val_sxtwN = load volatile fp128, fp128* %addr_sxtwN
-   store volatile fp128 %val_sxtwN, fp128* %base
+   %addr_sxtwN = getelementptr fp128, ptr %base, i32 %off32
+   %val_sxtwN = load volatile fp128, ptr %addr_sxtwN
+   store volatile fp128 %val_sxtwN, ptr %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 
-  %addr_lslN = getelementptr fp128, fp128* %base, i64 %off64
-  %val_lslN = load volatile fp128, fp128* %addr_lslN
-  store volatile fp128 %val_lslN, fp128* %base
+  %addr_lslN = getelementptr fp128, ptr %base, i64 %off64
+  %val_lslN = load volatile fp128, ptr %addr_lslN
+  store volatile fp128 %val_lslN, ptr %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #4]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 
-  %addrint_uxtw = ptrtoint fp128* %base to i64
+  %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = zext i32 %off32 to i64
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
-  %addr_uxtw = inttoptr i64 %addrint1_uxtw to fp128*
-  %val_uxtw = load volatile fp128, fp128* %addr_uxtw
-  store volatile fp128 %val_uxtw, fp128* %base
+  %addr_uxtw = inttoptr i64 %addrint1_uxtw to ptr
+  %val_uxtw = load volatile fp128, ptr %addr_uxtw
+  store volatile fp128 %val_uxtw, ptr %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 
-  %base_sxtw = ptrtoint fp128* %base to i64
+  %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw = sext i32 %off32 to i64
   %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
-  %addr_sxtw = inttoptr i64 %addrint_sxtw to fp128*
-  %val64_sxtw = load volatile fp128, fp128* %addr_sxtw
-  store volatile fp128 %val64_sxtw, fp128* %base
+  %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
+  %val64_sxtw = load volatile fp128, ptr %addr_sxtw
+  store volatile fp128 %val64_sxtw, ptr %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 
-  %base_lsl = ptrtoint fp128* %base to i64
+  %base_lsl = ptrtoint ptr %base to i64
   %addrint_lsl = add i64 %base_lsl, %off64
-  %addr_lsl = inttoptr i64 %addrint_lsl to fp128*
-  %val64_lsl = load volatile fp128, fp128* %addr_lsl
-  store volatile fp128 %val64_lsl, fp128* %base
+  %addr_lsl = inttoptr i64 %addrint_lsl to ptr
+  %val64_lsl = load volatile fp128, ptr %addr_lsl
+  store volatile fp128 %val64_lsl, ptr %base
 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
 
-  %base_uxtwN = ptrtoint fp128* %base to i64
+  %base_uxtwN = ptrtoint ptr %base to i64
   %offset_uxtwN = zext i32 %off32 to i64
   %offset2_uxtwN = shl i64 %offset_uxtwN, 4
   %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
-  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to fp128*
-  %val64 = load volatile fp128, fp128* %base
-  store volatile fp128 %val64, fp128* %addr_uxtwN
+  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
+  %val64 = load volatile fp128, ptr %base
+  store volatile fp128 %val64, ptr %addr_uxtwN
 ; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, uxtw #4]
 ; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
    ret void

diff  --git a/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll b/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll
index 02d36c865fe62..24c22847677ef 100644
--- a/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll
@@ -9,64 +9,64 @@
 @var_float = dso_local global float 0.0
 @var_double = dso_local global double 0.0
 
- at varptr = dso_local global i8* null
+ at varptr = dso_local global ptr null
 
 define dso_local void @ldst_8bit() {
 ; CHECK-LABEL: ldst_8bit:
 
 ; No architectural support for loads to 16-bit or 8-bit since we
 ; promote i8 during lowering.
-  %addr_8bit = load i8*, i8** @varptr
+  %addr_8bit = load ptr, ptr @varptr
 
 ; match a sign-extending load 8-bit -> 32-bit
-   %addr_sext32 = getelementptr i8, i8* %addr_8bit, i64 -256
-   %val8_sext32 = load volatile i8, i8* %addr_sext32
+   %addr_sext32 = getelementptr i8, ptr %addr_8bit, i64 -256
+   %val8_sext32 = load volatile i8, ptr %addr_sext32
    %val32_signed = sext i8 %val8_sext32 to i32
-   store volatile i32 %val32_signed, i32* @var_32bit
+   store volatile i32 %val32_signed, ptr @var_32bit
 ; CHECK: ldursb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
 
 ; match a zero-extending load volatile 8-bit -> 32-bit
-  %addr_zext32 = getelementptr i8, i8* %addr_8bit, i64 -12
-  %val8_zext32 = load volatile i8, i8* %addr_zext32
+  %addr_zext32 = getelementptr i8, ptr %addr_8bit, i64 -12
+  %val8_zext32 = load volatile i8, ptr %addr_zext32
   %val32_unsigned = zext i8 %val8_zext32 to i32
-  store volatile i32 %val32_unsigned, i32* @var_32bit
+  store volatile i32 %val32_unsigned, ptr @var_32bit
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-12]
 
 ; match an any-extending load volatile 8-bit -> 32-bit
-  %addr_anyext = getelementptr i8, i8* %addr_8bit, i64 -1
-  %val8_anyext = load volatile i8, i8* %addr_anyext
+  %addr_anyext = getelementptr i8, ptr %addr_8bit, i64 -1
+  %val8_anyext = load volatile i8, ptr %addr_anyext
   %newval8 = add i8 %val8_anyext, 1
-  store volatile i8 %newval8, i8* @var_8bit
+  store volatile i8 %newval8, ptr @var_8bit
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
 
 ; match a sign-extending load volatile 8-bit -> 64-bit
-  %addr_sext64 = getelementptr i8, i8* %addr_8bit, i64 -5
-  %val8_sext64 = load volatile i8, i8* %addr_sext64
+  %addr_sext64 = getelementptr i8, ptr %addr_8bit, i64 -5
+  %val8_sext64 = load volatile i8, ptr %addr_sext64
   %val64_signed = sext i8 %val8_sext64 to i64
-  store volatile i64 %val64_signed, i64* @var_64bit
+  store volatile i64 %val64_signed, ptr @var_64bit
 ; CHECK: ldursb {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
 
 ; match a zero-extending load volatile 8-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
 ; of x0 so it's identical to load volatileing to 32-bits.
-  %addr_zext64 = getelementptr i8, i8* %addr_8bit, i64 -9
-  %val8_zext64 = load volatile i8, i8* %addr_zext64
+  %addr_zext64 = getelementptr i8, ptr %addr_8bit, i64 -9
+  %val8_zext64 = load volatile i8, ptr %addr_zext64
   %val64_unsigned = zext i8 %val8_zext64 to i64
-  store volatile i64 %val64_unsigned, i64* @var_64bit
+  store volatile i64 %val64_unsigned, ptr @var_64bit
 ; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-9]
 
 ; truncating store volatile 32-bits to 8-bits
-  %addr_trunc32 = getelementptr i8, i8* %addr_8bit, i64 -256
-  %val32 = load volatile i32, i32* @var_32bit
+  %addr_trunc32 = getelementptr i8, ptr %addr_8bit, i64 -256
+  %val32 = load volatile i32, ptr @var_32bit
   %val8_trunc32 = trunc i32 %val32 to i8
-  store volatile i8 %val8_trunc32, i8* %addr_trunc32
+  store volatile i8 %val8_trunc32, ptr %addr_trunc32
 ; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
 
 ; truncating store volatile 64-bits to 8-bits
-  %addr_trunc64 = getelementptr i8, i8* %addr_8bit, i64 -1
-  %val64 = load volatile i64, i64* @var_64bit
+  %addr_trunc64 = getelementptr i8, ptr %addr_8bit, i64 -1
+  %val64 = load volatile i64, ptr @var_64bit
   %val8_trunc64 = trunc i64 %val64 to i8
-  store volatile i8 %val8_trunc64, i8* %addr_trunc64
+  store volatile i8 %val8_trunc64, ptr %addr_trunc64
 ; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
 
    ret void
@@ -77,64 +77,57 @@ define dso_local void @ldst_16bit() {
 
 ; No architectural support for loads to 16-bit or 16-bit since we
 ; promote i16 during lowering.
-  %addr_8bit = load i8*, i8** @varptr
+  %addr_8bit = load ptr, ptr @varptr
 
 ; match a sign-extending load 16-bit -> 32-bit
-   %addr8_sext32 = getelementptr i8, i8* %addr_8bit, i64 -256
-   %addr_sext32 = bitcast i8* %addr8_sext32 to i16*
-   %val16_sext32 = load volatile i16, i16* %addr_sext32
+   %addr8_sext32 = getelementptr i8, ptr %addr_8bit, i64 -256
+   %val16_sext32 = load volatile i16, ptr %addr8_sext32
    %val32_signed = sext i16 %val16_sext32 to i32
-   store volatile i32 %val32_signed, i32* @var_32bit
+   store volatile i32 %val32_signed, ptr @var_32bit
 ; CHECK: ldursh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
 
 ; match a zero-extending load volatile 16-bit -> 32-bit. With offset that would be unaligned.
-  %addr8_zext32 = getelementptr i8, i8* %addr_8bit, i64 15
-  %addr_zext32 = bitcast i8* %addr8_zext32 to i16*
-  %val16_zext32 = load volatile i16, i16* %addr_zext32
+  %addr8_zext32 = getelementptr i8, ptr %addr_8bit, i64 15
+  %val16_zext32 = load volatile i16, ptr %addr8_zext32
   %val32_unsigned = zext i16 %val16_zext32 to i32
-  store volatile i32 %val32_unsigned, i32* @var_32bit
+  store volatile i32 %val32_unsigned, ptr @var_32bit
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #15]
 
 ; match an any-extending load volatile 16-bit -> 32-bit
-  %addr8_anyext = getelementptr i8, i8* %addr_8bit, i64 -1
-  %addr_anyext = bitcast i8* %addr8_anyext to i16*
-  %val16_anyext = load volatile i16, i16* %addr_anyext
+  %addr8_anyext = getelementptr i8, ptr %addr_8bit, i64 -1
+  %val16_anyext = load volatile i16, ptr %addr8_anyext
   %newval16 = add i16 %val16_anyext, 1
-  store volatile i16 %newval16, i16* @var_16bit
+  store volatile i16 %newval16, ptr @var_16bit
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
 
 ; match a sign-extending load volatile 16-bit -> 64-bit
-  %addr8_sext64 = getelementptr i8, i8* %addr_8bit, i64 -5
-  %addr_sext64 = bitcast i8* %addr8_sext64 to i16*
-  %val16_sext64 = load volatile i16, i16* %addr_sext64
+  %addr8_sext64 = getelementptr i8, ptr %addr_8bit, i64 -5
+  %val16_sext64 = load volatile i16, ptr %addr8_sext64
   %val64_signed = sext i16 %val16_sext64 to i64
-  store volatile i64 %val64_signed, i64* @var_64bit
+  store volatile i64 %val64_signed, ptr @var_64bit
 ; CHECK: ldursh {{x[0-9]+}}, [{{x[0-9]+}}, #-5]
 
 ; match a zero-extending load volatile 16-bit -> 64-bit.
 ; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
 ; of x0 so it's identical to load volatileing to 32-bits.
-  %addr8_zext64 = getelementptr i8, i8* %addr_8bit, i64 9
-  %addr_zext64 = bitcast i8* %addr8_zext64 to i16*
-  %val16_zext64 = load volatile i16, i16* %addr_zext64
+  %addr8_zext64 = getelementptr i8, ptr %addr_8bit, i64 9
+  %val16_zext64 = load volatile i16, ptr %addr8_zext64
   %val64_unsigned = zext i16 %val16_zext64 to i64
-  store volatile i64 %val64_unsigned, i64* @var_64bit
+  store volatile i64 %val64_unsigned, ptr @var_64bit
 ; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #9]
 
 ; truncating store volatile 32-bits to 16-bits
-  %addr8_trunc32 = getelementptr i8, i8* %addr_8bit, i64 -256
-  %addr_trunc32 = bitcast i8* %addr8_trunc32 to i16*
-  %val32 = load volatile i32, i32* @var_32bit
+  %addr8_trunc32 = getelementptr i8, ptr %addr_8bit, i64 -256
+  %val32 = load volatile i32, ptr @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
-  store volatile i16 %val16_trunc32, i16* %addr_trunc32
+  store volatile i16 %val16_trunc32, ptr %addr8_trunc32
 ; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
 
 ; truncating store volatile 64-bits to 16-bits
-  %addr8_trunc64 = getelementptr i8, i8* %addr_8bit, i64 -1
-  %addr_trunc64 = bitcast i8* %addr8_trunc64 to i16*
-  %val64 = load volatile i64, i64* @var_64bit
+  %addr8_trunc64 = getelementptr i8, ptr %addr_8bit, i64 -1
+  %val64 = load volatile i64, ptr @var_64bit
   %val16_trunc64 = trunc i64 %val64 to i16
-  store volatile i16 %val16_trunc64, i16* %addr_trunc64
+  store volatile i16 %val16_trunc64, ptr %addr8_trunc64
 ; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
 
    ret void
@@ -143,43 +136,38 @@ define dso_local void @ldst_16bit() {
 define dso_local void @ldst_32bit() {
 ; CHECK-LABEL: ldst_32bit:
 
-  %addr_8bit = load i8*, i8** @varptr
+  %addr_8bit = load ptr, ptr @varptr
 
 ; Straight 32-bit load/store
-  %addr32_8_noext = getelementptr i8, i8* %addr_8bit, i64 1
-  %addr32_noext = bitcast i8* %addr32_8_noext to i32*
-  %val32_noext = load volatile i32, i32* %addr32_noext
-  store volatile i32 %val32_noext, i32* %addr32_noext
+  %addr32_8_noext = getelementptr i8, ptr %addr_8bit, i64 1
+  %val32_noext = load volatile i32, ptr %addr32_8_noext
+  store volatile i32 %val32_noext, ptr %addr32_8_noext
 ; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
 ; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
 
 ; Zero-extension to 64-bits
-  %addr32_8_zext = getelementptr i8, i8* %addr_8bit, i64 -256
-  %addr32_zext = bitcast i8* %addr32_8_zext to i32*
-  %val32_zext = load volatile i32, i32* %addr32_zext
+  %addr32_8_zext = getelementptr i8, ptr %addr_8bit, i64 -256
+  %val32_zext = load volatile i32, ptr %addr32_8_zext
   %val64_unsigned = zext i32 %val32_zext to i64
-  store volatile i64 %val64_unsigned, i64* @var_64bit
+  store volatile i64 %val64_unsigned, ptr @var_64bit
 ; CHECK: ldur {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Sign-extension to 64-bits
-  %addr32_8_sext = getelementptr i8, i8* %addr_8bit, i64 -12
-  %addr32_sext = bitcast i8* %addr32_8_sext to i32*
-  %val32_sext = load volatile i32, i32* %addr32_sext
+  %addr32_8_sext = getelementptr i8, ptr %addr_8bit, i64 -12
+  %val32_sext = load volatile i32, ptr %addr32_8_sext
   %val64_signed = sext i32 %val32_sext to i64
-  store volatile i64 %val64_signed, i64* @var_64bit
+  store volatile i64 %val64_signed, ptr @var_64bit
 ; CHECK: ldursw {{x[0-9]+}}, [{{x[0-9]+}}, #-12]
 ; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
 
 ; Truncation from 64-bits
-  %addr64_8_trunc = getelementptr i8, i8* %addr_8bit, i64 255
-  %addr64_trunc = bitcast i8* %addr64_8_trunc to i64*
-  %addr32_8_trunc = getelementptr i8, i8* %addr_8bit, i64 -20
-  %addr32_trunc = bitcast i8* %addr32_8_trunc to i32*
+  %addr64_8_trunc = getelementptr i8, ptr %addr_8bit, i64 255
+  %addr32_8_trunc = getelementptr i8, ptr %addr_8bit, i64 -20
 
-  %val64_trunc = load volatile i64, i64* %addr64_trunc
+  %val64_trunc = load volatile i64, ptr %addr64_8_trunc
   %val32_trunc = trunc i64 %val64_trunc to i32
-  store volatile i32 %val32_trunc, i32* %addr32_trunc
+  store volatile i32 %val32_trunc, ptr %addr32_8_trunc
 ; CHECK: ldur {{x[0-9]+}}, [{{x[0-9]+}}, #255]
 ; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #-20]
 
@@ -189,15 +177,14 @@ define dso_local void @ldst_32bit() {
 define dso_local void @ldst_float() {
 ; CHECK-LABEL: ldst_float:
 
-  %addr_8bit = load i8*, i8** @varptr
-  %addrfp_8 = getelementptr i8, i8* %addr_8bit, i64 -5
-  %addrfp = bitcast i8* %addrfp_8 to float*
+  %addr_8bit = load ptr, ptr @varptr
+  %addrfp_8 = getelementptr i8, ptr %addr_8bit, i64 -5
 
-  %valfp = load volatile float, float* %addrfp
+  %valfp = load volatile float, ptr %addrfp_8
 ; CHECK: ldur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
 ; CHECK-NOFP-NOT: ldur {{s[0-9]+}},
 
-  store volatile float %valfp, float* %addrfp
+  store volatile float %valfp, ptr %addrfp_8
 ; CHECK: stur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
 ; CHECK-NOFP-NOT: stur {{s[0-9]+}},
 
@@ -207,15 +194,14 @@ define dso_local void @ldst_float() {
 define dso_local void @ldst_double() {
 ; CHECK-LABEL: ldst_double:
 
-  %addr_8bit = load i8*, i8** @varptr
-  %addrfp_8 = getelementptr i8, i8* %addr_8bit, i64 4
-  %addrfp = bitcast i8* %addrfp_8 to double*
+  %addr_8bit = load ptr, ptr @varptr
+  %addrfp_8 = getelementptr i8, ptr %addr_8bit, i64 4
 
-  %valfp = load volatile double, double* %addrfp
+  %valfp = load volatile double, ptr %addrfp_8
 ; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
 ; CHECK-NOFP-NOT: ldur {{d[0-9]+}},
 
-  store volatile double %valfp, double* %addrfp
+  store volatile double %valfp, ptr %addrfp_8
 ; CHECK: stur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
 ; CHECK-NOFP-NOT: stur {{d[0-9]+}},
 

diff  --git a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
index 76224e8fd1e55..0ca3483080003 100644
--- a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll
@@ -16,7 +16,7 @@ define i32 @ld_s8_32() {
 ; CHECK-NEXT:    adrp x8, var_8bit
 ; CHECK-NEXT:    ldrsb w0, [x8, :lo12:var_8bit]
 ; CHECK-NEXT:    ret
-   %val8_sext32 = load i8, i8* @var_8bit
+   %val8_sext32 = load i8, ptr @var_8bit
    %val32_signed = sext i8 %val8_sext32 to i32
    ret i32 %val32_signed
 }
@@ -27,7 +27,7 @@ define i32 @ld_u8_32() {
 ; CHECK-NEXT:    adrp x8, var_8bit
 ; CHECK-NEXT:    ldrb w0, [x8, :lo12:var_8bit]
 ; CHECK-NEXT:    ret
-  %val8_zext32 = load i8, i8* @var_8bit
+  %val8_zext32 = load i8, ptr @var_8bit
   %val32_unsigned = zext i8 %val8_zext32 to i32
   ret i32 %val32_unsigned
 }
@@ -38,7 +38,7 @@ define i64 @ld_s8_64() {
 ; CHECK-NEXT:    adrp x8, var_8bit
 ; CHECK-NEXT:    ldrsb x0, [x8, :lo12:var_8bit]
 ; CHECK-NEXT:    ret
-  %val8_sext64 = load i8, i8* @var_8bit
+  %val8_sext64 = load i8, ptr @var_8bit
   %val64_signed = sext i8 %val8_sext64 to i64
   ret i64 %val64_signed
 }
@@ -49,7 +49,7 @@ define i64 @ld_u8_64() {
 ; CHECK-NEXT:    adrp x8, var_8bit
 ; CHECK-NEXT:    ldrb w0, [x8, :lo12:var_8bit]
 ; CHECK-NEXT:    ret
-  %val8_zext64 = load i8, i8* @var_8bit
+  %val8_zext64 = load i8, ptr @var_8bit
   %val64_unsigned = zext i8 %val8_zext64 to i64
   ret i64 %val64_unsigned
 }
@@ -61,7 +61,7 @@ define i8 @ld_a8_8() {
 ; CHECK-NEXT:    ldrb w8, [x8, :lo12:var_8bit]
 ; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
-  %val8_anyext = load i8, i8* @var_8bit
+  %val8_anyext = load i8, ptr @var_8bit
   %newval8 = add i8 %val8_anyext, 1
   ret i8 %newval8
 }
@@ -73,7 +73,7 @@ define void @st_i32_8(i32 %val32) {
 ; CHECK-NEXT:    strb w0, [x8, :lo12:var_8bit]
 ; CHECK-NEXT:    ret
   %val8_trunc32 = trunc i32 %val32 to i8
-  store i8 %val8_trunc32, i8* @var_8bit
+  store i8 %val8_trunc32, ptr @var_8bit
   ret void
 }
 
@@ -84,7 +84,7 @@ define void @st_i64_8(i64 %val64) {
 ; CHECK-NEXT:    strb w0, [x8, :lo12:var_8bit]
 ; CHECK-NEXT:    ret
   %val8_trunc64 = trunc i64 %val64 to i8
-  store i8 %val8_trunc64, i8* @var_8bit
+  store i8 %val8_trunc64, ptr @var_8bit
   ret void
 }
 
@@ -95,7 +95,7 @@ define i32 @ld_s16_32() {
 ; CHECK-NEXT:    adrp x8, var_16bit
 ; CHECK-NEXT:    ldrsh w0, [x8, :lo12:var_16bit]
 ; CHECK-NEXT:    ret
-   %val16_sext32 = load i16, i16* @var_16bit
+   %val16_sext32 = load i16, ptr @var_16bit
    %val32_signed = sext i16 %val16_sext32 to i32
    ret i32 %val32_signed
 }
@@ -106,7 +106,7 @@ define i32 @ld_u16_32() {
 ; CHECK-NEXT:    adrp x8, var_16bit
 ; CHECK-NEXT:    ldrh w0, [x8, :lo12:var_16bit]
 ; CHECK-NEXT:    ret
-  %val16_zext32 = load i16, i16* @var_16bit
+  %val16_zext32 = load i16, ptr @var_16bit
   %val32_unsigned = zext i16 %val16_zext32 to i32
   ret i32 %val32_unsigned
 }
@@ -117,7 +117,7 @@ define i64 @ld_s16_64() {
 ; CHECK-NEXT:    adrp x8, var_16bit
 ; CHECK-NEXT:    ldrsh x0, [x8, :lo12:var_16bit]
 ; CHECK-NEXT:    ret
-  %val16_sext64 = load i16, i16* @var_16bit
+  %val16_sext64 = load i16, ptr @var_16bit
   %val64_signed = sext i16 %val16_sext64 to i64
   ret i64 %val64_signed
 }
@@ -128,7 +128,7 @@ define i64 @ld_u16_64() {
 ; CHECK-NEXT:    adrp x8, var_16bit
 ; CHECK-NEXT:    ldrh w0, [x8, :lo12:var_16bit]
 ; CHECK-NEXT:    ret
-  %val16_zext64 = load i16, i16* @var_16bit
+  %val16_zext64 = load i16, ptr @var_16bit
   %val64_unsigned = zext i16 %val16_zext64 to i64
   ret i64 %val64_unsigned
 }
@@ -140,7 +140,7 @@ define i16 @ld_a16_16() {
 ; CHECK-NEXT:    ldrh w8, [x8, :lo12:var_16bit]
 ; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
-  %val16_anyext = load i16, i16* @var_16bit
+  %val16_anyext = load i16, ptr @var_16bit
   %newval16 = add i16 %val16_anyext, 1
   ret i16 %newval16
 }
@@ -152,7 +152,7 @@ define void @st_i32_16(i32 %val32) {
 ; CHECK-NEXT:    strh w0, [x8, :lo12:var_16bit]
 ; CHECK-NEXT:    ret
   %val16_trunc32 = trunc i32 %val32 to i16
-  store i16 %val16_trunc32, i16* @var_16bit
+  store i16 %val16_trunc32, ptr @var_16bit
   ret void
 }
 
@@ -163,7 +163,7 @@ define void @st_i64_16(i64 %val64) {
 ; CHECK-NEXT:    strh w0, [x8, :lo12:var_16bit]
 ; CHECK-NEXT:    ret
   %val16_trunc64 = trunc i64 %val64 to i16
-  store i16 %val16_trunc64, i16* @var_16bit
+  store i16 %val16_trunc64, ptr @var_16bit
   ret void
 }
 
@@ -174,7 +174,7 @@ define i64 @ld_s32_64() {
 ; CHECK-NEXT:    adrp x8, var_32bit
 ; CHECK-NEXT:    ldrsw x0, [x8, :lo12:var_32bit]
 ; CHECK-NEXT:    ret
-  %val32_sext64 = load i32, i32* @var_32bit
+  %val32_sext64 = load i32, ptr @var_32bit
   %val64_signed = sext i32 %val32_sext64 to i64
   ret i64 %val64_signed
 }
@@ -185,7 +185,7 @@ define i64 @ld_u32_64() {
 ; CHECK-NEXT:    adrp x8, var_32bit
 ; CHECK-NEXT:    ldr w0, [x8, :lo12:var_32bit]
 ; CHECK-NEXT:    ret
-  %val32_zext64 = load i32, i32* @var_32bit
+  %val32_zext64 = load i32, ptr @var_32bit
   %val64_unsigned = zext i32 %val32_zext64 to i64
   ret i64 %val64_unsigned
 }
@@ -197,7 +197,7 @@ define i32 @ld_a32_32() {
 ; CHECK-NEXT:    ldr w8, [x8, :lo12:var_32bit]
 ; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
-  %val32_anyext = load i32, i32* @var_32bit
+  %val32_anyext = load i32, ptr @var_32bit
   %newval32 = add i32 %val32_anyext, 1
   ret i32 %newval32
 }
@@ -209,98 +209,98 @@ define void @st_i64_32(i64 %val64) {
 ; CHECK-NEXT:    str w0, [x8, :lo12:var_32bit]
 ; CHECK-NEXT:    ret
   %val32_trunc64 = trunc i64 %val64 to i32
-  store i32 %val32_trunc64, i32* @var_32bit
+  store i32 %val32_trunc64, ptr @var_32bit
   ret void
 }
 
 
- at arr8 = dso_local global i8* null
- at arr16 = dso_local global i16* null
- at arr32 = dso_local global i32* null
- at arr64 = dso_local global i64* null
+ at arr8 = dso_local global ptr null
+ at arr16 = dso_local global ptr null
+ at arr32 = dso_local global ptr null
+ at arr64 = dso_local global ptr null
 
 ; Now check that our selection copes with accesses more complex than a
 ; single symbol. Permitted offsets should be folded into the loads and
 ; stores. Since all forms use the same Operand it's only necessary to
 ; check the various access-sizes involved.
 
-define i8 @ld_i8_1(i8* %arr8_addr) {
+define i8 @ld_i8_1(ptr %arr8_addr) {
 ; CHECK-LABEL: ld_i8_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w0, [x0, #1]
 ; CHECK-NEXT:    ret
-  %arr8_sub1_addr = getelementptr i8, i8* %arr8_addr, i64 1
-  %arr8_sub1 = load volatile i8, i8* %arr8_sub1_addr
+  %arr8_sub1_addr = getelementptr i8, ptr %arr8_addr, i64 1
+  %arr8_sub1 = load volatile i8, ptr %arr8_sub1_addr
   ret i8 %arr8_sub1
 }
 
-define i8 @ld_i8_4095(i8* %arr8_addr) {
+define i8 @ld_i8_4095(ptr %arr8_addr) {
 ; CHECK-LABEL: ld_i8_4095:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w0, [x0, #4095]
 ; CHECK-NEXT:    ret
-  %arr8_sub4095_addr = getelementptr i8, i8* %arr8_addr, i64 4095
-  %arr8_sub4095 = load volatile i8, i8* %arr8_sub4095_addr
+  %arr8_sub4095_addr = getelementptr i8, ptr %arr8_addr, i64 4095
+  %arr8_sub4095 = load volatile i8, ptr %arr8_sub4095_addr
   ret i8 %arr8_sub4095
 }
 
-define i16 @ld_i16_1(i16* %arr16_addr) {
+define i16 @ld_i16_1(ptr %arr16_addr) {
 ; CHECK-LABEL: ld_i16_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w0, [x0, #2]
 ; CHECK-NEXT:    ret
-  %arr16_sub1_addr = getelementptr i16, i16* %arr16_addr, i64 1
-  %arr16_sub1 = load volatile i16, i16* %arr16_sub1_addr
+  %arr16_sub1_addr = getelementptr i16, ptr %arr16_addr, i64 1
+  %arr16_sub1 = load volatile i16, ptr %arr16_sub1_addr
   ret i16 %arr16_sub1
 }
 
-define i16 @ld_i16_4095(i16* %arr16_addr) {
+define i16 @ld_i16_4095(ptr %arr16_addr) {
 ; CHECK-LABEL: ld_i16_4095:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w0, [x0, #8190]
 ; CHECK-NEXT:    ret
-  %arr16_sub4095_addr = getelementptr i16, i16* %arr16_addr, i64 4095
-  %arr16_sub4095 = load volatile i16, i16* %arr16_sub4095_addr
+  %arr16_sub4095_addr = getelementptr i16, ptr %arr16_addr, i64 4095
+  %arr16_sub4095 = load volatile i16, ptr %arr16_sub4095_addr
   ret i16 %arr16_sub4095
 }
 
-define i32 @ld_i32_1(i32* %arr32_addr) {
+define i32 @ld_i32_1(ptr %arr32_addr) {
 ; CHECK-LABEL: ld_i32_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0, #4]
 ; CHECK-NEXT:    ret
-  %arr32_sub1_addr = getelementptr i32, i32* %arr32_addr, i64 1
-  %arr32_sub1 = load volatile i32, i32* %arr32_sub1_addr
+  %arr32_sub1_addr = getelementptr i32, ptr %arr32_addr, i64 1
+  %arr32_sub1 = load volatile i32, ptr %arr32_sub1_addr
   ret i32 %arr32_sub1
 }
 
-define i32 @ld_i32_4095(i32* %arr32_addr) {
+define i32 @ld_i32_4095(ptr %arr32_addr) {
 ; CHECK-LABEL: ld_i32_4095:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0, #16380]
 ; CHECK-NEXT:    ret
-  %arr32_sub4095_addr = getelementptr i32, i32* %arr32_addr, i64 4095
-  %arr32_sub4095 = load volatile i32, i32* %arr32_sub4095_addr
+  %arr32_sub4095_addr = getelementptr i32, ptr %arr32_addr, i64 4095
+  %arr32_sub4095 = load volatile i32, ptr %arr32_sub4095_addr
   ret i32 %arr32_sub4095
 }
 
-define i64 @ld_i64_1(i64* %arr64_addr) {
+define i64 @ld_i64_1(ptr %arr64_addr) {
 ; CHECK-LABEL: ld_i64_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x0, [x0, #8]
 ; CHECK-NEXT:    ret
-  %arr64_sub1_addr = getelementptr i64, i64* %arr64_addr, i64 1
-  %arr64_sub1 = load volatile i64, i64* %arr64_sub1_addr
+  %arr64_sub1_addr = getelementptr i64, ptr %arr64_addr, i64 1
+  %arr64_sub1 = load volatile i64, ptr %arr64_sub1_addr
   ret i64 %arr64_sub1
 }
 
-define i64 @ld_i64_4095(i64* %arr64_addr) {
+define i64 @ld_i64_4095(ptr %arr64_addr) {
 ; CHECK-LABEL: ld_i64_4095:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x0, [x0, #32760]
 ; CHECK-NEXT:    ret
-  %arr64_sub4095_addr = getelementptr i64, i64* %arr64_addr, i64 4095
-  %arr64_sub4095 = load volatile i64, i64* %arr64_sub4095_addr
+  %arr64_sub4095_addr = getelementptr i64, ptr %arr64_addr, i64 4095
+  %arr64_sub4095 = load volatile i64, ptr %arr64_sub4095_addr
   ret i64 %arr64_sub4095
 }
 
@@ -318,8 +318,8 @@ define dso_local void @ldst_float() {
 ; CHECK-NOFP-NEXT:    ldr w9, [x8, :lo12:var_float]
 ; CHECK-NOFP-NEXT:    str w9, [x8, :lo12:var_float]
 ; CHECK-NOFP-NEXT:    ret
-  %valfp = load volatile float, float* @var_float
-  store volatile float %valfp, float* @var_float
+  %valfp = load volatile float, ptr @var_float
+  store volatile float %valfp, ptr @var_float
   ret void
 }
 
@@ -337,7 +337,7 @@ define dso_local void @ldst_double() {
 ; CHECK-NOFP-NEXT:    ldr x9, [x8, :lo12:var_double]
 ; CHECK-NOFP-NEXT:    str x9, [x8, :lo12:var_double]
 ; CHECK-NOFP-NEXT:    ret
-  %valfp = load volatile double, double* @var_double
-  store volatile double %valfp, double* @var_double
+  %valfp = load volatile double, ptr @var_double
+  store volatile double %valfp, ptr @var_double
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/ldst-zero.ll b/llvm/test/CodeGen/AArch64/ldst-zero.ll
index 0ada6fd84cbf3..1fcf74ae5ac4b 100644
--- a/llvm/test/CodeGen/AArch64/ldst-zero.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-zero.ll
@@ -3,69 +3,65 @@
 ; Tests to check that zero stores which are generated as STP xzr, xzr aren't
 ; scheduled incorrectly due to incorrect alias information
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-%struct.tree_common = type { i8*, i8*, i32 }
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
+%struct.tree_common = type { ptr, ptr, i32 }
 
 ; Original test case which exhibited the bug
-define void @test1(%struct.tree_common* %t, i32 %code, i8* %type) {
+define void @test1(ptr %t, i32 %code, ptr %type) {
 ; CHECK-LABEL: test1:
 ; CHECK-DAG: stp x2, xzr, [x0, #8]
 ; CHECK-DAG: str w1, [x0, #16]
 ; CHECK-DAG: str xzr, [x0]
 entry:
-  %0 = bitcast %struct.tree_common* %t to i8*
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 24, i1 false)
-  %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
-  store i32 %code, i32* %code1, align 8
-  %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
-  store i8* %type, i8** %type2, align 8
+  tail call void @llvm.memset.p0.i64(ptr align 8 %t, i8 0, i64 24, i1 false)
+  %code1 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 2
+  store i32 %code, ptr %code1, align 8
+  %type2 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 1
+  store ptr %type, ptr %type2, align 8
   ret void
 }
 
 ; Store to each struct element instead of using memset
-define void @test2(%struct.tree_common* %t, i32 %code, i8* %type) {
+define void @test2(ptr %t, i32 %code, ptr %type) {
 ; CHECK-LABEL: test2:
 ; CHECK-DAG: str w1, [x0, #16]
 ; CHECK-DAG: stp xzr, x2, [x0]
 entry:
-  %0 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 0
-  %1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
-  %2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
-  store i8* zeroinitializer, i8** %0, align 8
-  store i8* zeroinitializer, i8** %1, align 8
-  store i32 zeroinitializer, i32* %2, align 8
-  store i32 %code, i32* %2, align 8
-  store i8* %type, i8** %1, align 8
+  %0 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 1
+  %1 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 2
+  store ptr zeroinitializer, ptr %t, align 8
+  store ptr zeroinitializer, ptr %0, align 8
+  store i32 zeroinitializer, ptr %1, align 8
+  store i32 %code, ptr %1, align 8
+  store ptr %type, ptr %0, align 8
   ret void
 }
 
 ; Vector store instead of memset
-define void @test3(%struct.tree_common* %t, i32 %code, i8* %type) {
+define void @test3(ptr %t, i32 %code, ptr %type) {
 ; CHECK-LABEL: test3:
 ; CHECK-DAG: stp x2, xzr, [x0, #8]
 ; CHECK-DAG: str w1, [x0, #16]
 ; CHECK-DAG: str xzr, [x0]
 entry:
-  %0 = bitcast %struct.tree_common* %t to <3 x i64>*
-  store <3 x i64> zeroinitializer, <3 x i64>* %0, align 8
-  %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
-  store i32 %code, i32* %code1, align 8
-  %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
-  store i8* %type, i8** %type2, align 8
+  store <3 x i64> zeroinitializer, ptr %t, align 8
+  %code1 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 2
+  store i32 %code, ptr %code1, align 8
+  %type2 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 1
+  store ptr %type, ptr %type2, align 8
   ret void
 }
 
 ; Vector store, then store to vector elements
-define void @test4(<3 x i64>* %p, i64 %x, i64 %y) {
+define void @test4(ptr %p, i64 %x, i64 %y) {
 ; CHECK-LABEL: test4:
 ; CHECK-DAG: stp x2, x1, [x0, #8]
 ; CHECK-DAG: str xzr, [x0]
 entry:
-  store <3 x i64> zeroinitializer, <3 x i64>* %p, align 8
-  %0 = bitcast <3 x i64>* %p to i64*
-  %1 = getelementptr inbounds i64, i64* %0, i64 2
-  store i64 %x, i64* %1, align 8
-  %2 = getelementptr inbounds i64, i64* %0, i64 1
-  store i64 %y, i64* %2, align 8
+  store <3 x i64> zeroinitializer, ptr %p, align 8
+  %0 = getelementptr inbounds i64, ptr %p, i64 2
+  store i64 %x, ptr %0, align 8
+  %1 = getelementptr inbounds i64, ptr %p, i64 1
+  store i64 %y, ptr %1, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/literal_pools_float.ll b/llvm/test/CodeGen/AArch64/literal_pools_float.ll
index 6e396ddca52f3..19725ecfa3390 100644
--- a/llvm/test/CodeGen/AArch64/literal_pools_float.ll
+++ b/llvm/test/CodeGen/AArch64/literal_pools_float.ll
@@ -11,7 +11,7 @@
 define dso_local void @floating_lits() optsize {
 ; CHECK-LABEL: floating_lits:
 
-  %floatval = load float, float* @varfloat
+  %floatval = load float, ptr @varfloat
   %newfloat = fadd float %floatval, 511.0
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
 ; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
@@ -29,9 +29,9 @@ define dso_local void @floating_lits() optsize {
 ; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
 ; CHECK-NOFP-LARGE-NOT: fadd
 
-  store float %newfloat, float* @varfloat
+  store float %newfloat, ptr @varfloat
 
-  %doubleval = load double, double* @vardouble
+  %doubleval = load double, ptr @vardouble
   %newdouble = fadd double %doubleval, 511.0
 ; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI[0-9]+_[0-9]+]]
 ; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], {{#?}}:lo12:[[CURLIT]]]
@@ -49,7 +49,7 @@ define dso_local void @floating_lits() optsize {
 ; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
 ; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
 
-  store double %newdouble, double* @vardouble
+  store double %newdouble, ptr @vardouble
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/llvm-masked-gather-legal-for-sve.ll b/llvm/test/CodeGen/AArch64/llvm-masked-gather-legal-for-sve.ll
index 8427fdd24c0d5..09f09e2999f2c 100644
--- a/llvm/test/CodeGen/AArch64/llvm-masked-gather-legal-for-sve.ll
+++ b/llvm/test/CodeGen/AArch64/llvm-masked-gather-legal-for-sve.ll
@@ -5,8 +5,8 @@
 
 ; CHECK-LABEL: @masked_gather_nxv4i32(
 ; CHECK: call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32
-define <vscale x 4 x i32> @masked_gather_nxv4i32(<vscale x 4 x i32*> %ld, <vscale x 4 x i1> %masks, <vscale x 4 x i32> %passthru) {
-  %res = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ld, i32 0, <vscale x 4 x i1> %masks, <vscale x 4 x i32> %passthru)
+define <vscale x 4 x i32> @masked_gather_nxv4i32(<vscale x 4 x ptr> %ld, <vscale x 4 x i1> %masks, <vscale x 4 x i32> %passthru) {
+  %res = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ld, i32 0, <vscale x 4 x i1> %masks, <vscale x 4 x i32> %passthru)
   ret <vscale x 4 x i32> %res
 }
 
@@ -15,8 +15,8 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32(<vscale x 4 x i32*> %ld, <vscal
 
 ; CHECK-LABEL: @masked_gather_nxv2f64(
 ; CHECK: call <vscale x 2 x double> @llvm.masked.gather.nxv2f64
-define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x double*> %ld, <vscale x 2 x i1> %masks, <vscale x 2 x double> %passthru) {
-  %res = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ld, i32 0, <vscale x 2 x i1> %masks, <vscale x 2 x double> %passthru)
+define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x ptr> %ld, <vscale x 2 x i1> %masks, <vscale x 2 x double> %passthru) {
+  %res = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ld, i32 0, <vscale x 2 x i1> %masks, <vscale x 2 x double> %passthru)
   ret <vscale x 2 x double> %res
 }
 
@@ -25,8 +25,8 @@ define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x double*> %ld,
 
 ; CHECK-LABEL: @masked_gather_nxv2f16(
 ; CHECK: call <vscale x 2 x half> @llvm.masked.gather.nxv2f16
-define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x half*> %ld, <vscale x 2 x i1> %masks, <vscale x 2 x half> %passthru) {
-  %res = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ld, i32 0, <vscale x 2 x i1> %masks, <vscale x 2 x half> %passthru)
+define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x ptr> %ld, <vscale x 2 x i1> %masks, <vscale x 2 x half> %passthru) {
+  %res = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ld, i32 0, <vscale x 2 x i1> %masks, <vscale x 2 x half> %passthru)
   ret <vscale x 2 x half> %res
 }
 
@@ -36,8 +36,8 @@ define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x half*> %ld, <vsc
 
 ; CHECK-LABEL: @masked_gather_v2f32(
 ; CHECK-NOT: @llvm.masked.gather.v2f32(
-define <2 x float> @masked_gather_v2f32(<2 x float*> %ld, <2 x i1> %masks, <2 x float> %passthru) {
-  %res = call <2 x float> @llvm.masked.gather.v2f32(<2 x float*> %ld, i32 0, <2 x i1> %masks, <2 x float> %passthru)
+define <2 x float> @masked_gather_v2f32(<2 x ptr> %ld, <2 x i1> %masks, <2 x float> %passthru) {
+  %res = call <2 x float> @llvm.masked.gather.v2f32(<2 x ptr> %ld, i32 0, <2 x i1> %masks, <2 x float> %passthru)
   ret <2 x float> %res
 }
 
@@ -47,13 +47,13 @@ define <2 x float> @masked_gather_v2f32(<2 x float*> %ld, <2 x i1> %masks, <2 x
 
 ; CHECK-LABEL: @masked_gather_v4i32(
 ; CHECK-NOT: @llvm.masked.gather.v4i32(
-define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ld, <4 x i1> %masks, <4 x i32> %passthru) {
-  %res = call <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ld, i32 0, <4 x i1> %masks, <4 x i32> %passthru)
+define <4 x i32> @masked_gather_v4i32(<4 x ptr> %ld, <4 x i1> %masks, <4 x i32> %passthru) {
+  %res = call <4 x i32> @llvm.masked.gather.v4i32(<4 x ptr> %ld, i32 0, <4 x i1> %masks, <4 x i32> %passthru)
   ret <4 x i32> %res
 }
 
-declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 %align, <vscale x 4 x i1> %masks, <vscale x 4 x i32> %passthru)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 %align, <vscale x 2 x i1> %masks, <vscale x 2 x double> %passthru)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 %align, <vscale x 2 x i1> %masks, <vscale x 2 x half> %passthru)
-declare <2 x float> @llvm.masked.gather.v2f32(<2 x float*> %ptrs, i32 %align, <2 x i1> %masks, <2 x float> %passthru)
-declare <4 x i32> @llvm.masked.gather.v4i32(<4 x i32*> %ptrs, i32 %align, <4 x i1> %masks, <4 x i32> %passthru)
+declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 %align, <vscale x 4 x i1> %masks, <vscale x 4 x i32> %passthru)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 %align, <vscale x 2 x i1> %masks, <vscale x 2 x double> %passthru)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 %align, <vscale x 2 x i1> %masks, <vscale x 2 x half> %passthru)
+declare <2 x float> @llvm.masked.gather.v2f32(<2 x ptr> %ptrs, i32 %align, <2 x i1> %masks, <2 x float> %passthru)
+declare <4 x i32> @llvm.masked.gather.v4i32(<4 x ptr> %ptrs, i32 %align, <4 x i1> %masks, <4 x i32> %passthru)

diff  --git a/llvm/test/CodeGen/AArch64/llvm-masked-scatter-legal-for-sve.ll b/llvm/test/CodeGen/AArch64/llvm-masked-scatter-legal-for-sve.ll
index fa368718a5524..0a33dac76847e 100644
--- a/llvm/test/CodeGen/AArch64/llvm-masked-scatter-legal-for-sve.ll
+++ b/llvm/test/CodeGen/AArch64/llvm-masked-scatter-legal-for-sve.ll
@@ -5,8 +5,8 @@
 
 ; CHECK-LABEL: @masked_scatter_nxv4i32(
 ; CHECK: call void @llvm.masked.scatter.nxv4i32
-define void @masked_scatter_nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %masks) {
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+define void @masked_scatter_nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %masks) {
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
@@ -15,8 +15,8 @@ define void @masked_scatter_nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*
 
 ; CHECK-LABEL: @masked_scatter_nxv2f64(
 ; CHECK: call void @llvm.masked.scatter.nxv2f64
-define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %masks) {
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) {
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
@@ -25,8 +25,8 @@ define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x d
 
 ; CHECK-LABEL: @masked_scatter_nxv2f16(
 ; CHECK: call void @llvm.masked.scatter.nxv2f16
-define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %masks) {
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %masks) {
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
@@ -36,8 +36,8 @@ define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x hal
 
 ; CHECK-LABEL: @masked_scatter_v2f32(
 ; CHECK-NOT: @llvm.masked.scatter.v2f32(
-define void @masked_scatter_v2f32(<2 x float> %data, <2 x float*> %ptrs, <2 x i1> %masks) {
-  call void @llvm.masked.scatter.v2f32(<2 x float> %data, <2 x float*> %ptrs, i32 0, <2 x i1> %masks)
+define void @masked_scatter_v2f32(<2 x float> %data, <2 x ptr> %ptrs, <2 x i1> %masks) {
+  call void @llvm.masked.scatter.v2f32(<2 x float> %data, <2 x ptr> %ptrs, i32 0, <2 x i1> %masks)
   ret void
 }
 
@@ -47,13 +47,13 @@ define void @masked_scatter_v2f32(<2 x float> %data, <2 x float*> %ptrs, <2 x i1
 
 ; CHECK-LABEL: @masked_scatter_v4i32(
 ; CHECK-NOT: @llvm.masked.scatter.v4i32(
-define void @masked_scatter_v4i32(<4 x i32> %data, <4 x i32*> %ptrs, <4 x i1> %masks) {
-  call void @llvm.masked.scatter.v4i32(<4 x i32> %data, <4 x i32*> %ptrs, i32 0, <4 x i1> %masks)
+define void @masked_scatter_v4i32(<4 x i32> %data, <4 x ptr> %ptrs, <4 x i1> %masks) {
+  call void @llvm.masked.scatter.v4i32(<4 x i32> %data, <4 x ptr> %ptrs, i32 0, <4 x i1> %masks)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 %align, <vscale x 4 x i1> %masks)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 %align, <vscale x 2 x i1> %masks)
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 %align, <vscale x 2 x i1> %masks)
-declare void @llvm.masked.scatter.v2f32(<2 x float> %data, <2 x float*> %ptrs, i32 %align, <2 x i1> %masks)
-declare void @llvm.masked.scatter.v4i32(<4 x i32> %data, <4 x i32*> %ptrs, i32 %align, <4 x i1> %masks)
+declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 %align, <vscale x 4 x i1> %masks)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 %align, <vscale x 2 x i1> %masks)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 %align, <vscale x 2 x i1> %masks)
+declare void @llvm.masked.scatter.v2f32(<2 x float> %data, <2 x ptr> %ptrs, i32 %align, <2 x i1> %masks)
+declare void @llvm.masked.scatter.v4i32(<4 x i32> %data, <4 x ptr> %ptrs, i32 %align, <4 x i1> %masks)

diff  --git a/llvm/test/CodeGen/AArch64/load-combine-big-endian.ll b/llvm/test/CodeGen/AArch64/load-combine-big-endian.ll
index bff4f2113df3a..3b86acc7720e3 100644
--- a/llvm/test/CodeGen/AArch64/load-combine-big-endian.ll
+++ b/llvm/test/CodeGen/AArch64/load-combine-big-endian.ll
@@ -1,54 +1,52 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64_be-unknown | FileCheck %s
 
-; i8* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
-define i32 @load_i32_by_i8_big_endian(i32* %arg) {
+define i32 @load_i32_by_i8_big_endian(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_big_endian:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = load i8, i8* %tmp, align 4
+  %tmp1 = load i8, ptr %arg, align 4
   %tmp2 = zext i8 %tmp1 to i32
   %tmp3 = shl nuw nsw i32 %tmp2, 24
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 16
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 8
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = or i32 %tmp13, %tmp16
   ret i32 %tmp17
 }
 
-; i8* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; ((i32) (((i16) p[0] << 8) | (i16) p[1]) << 16) | (i32) (((i16) p[3] << 8) | (i16) p[4])
-define i32 @load_i32_by_i16_by_i8_big_endian(i32* %arg) {
+define i32 @load_i32_by_i16_by_i8_big_endian(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i16_by_i8_big_endian:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = load i8, i8* %tmp, align 4
+  %tmp1 = load i8, ptr %arg, align 4
   %tmp2 = zext i8 %tmp1 to i16
-  %tmp3 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = zext i8 %tmp4 to i16
   %tmp6 = shl nuw nsw i16 %tmp2, 8
   %tmp7 = or i16 %tmp6, %tmp5
-  %tmp8 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp9 = load i8, i8* %tmp8, align 1
+  %tmp8 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp9 = load i8, ptr %tmp8, align 1
   %tmp10 = zext i8 %tmp9 to i16
-  %tmp11 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp12 = load i8, i8* %tmp11, align 1
+  %tmp11 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp12 = load i8, ptr %tmp11, align 1
   %tmp13 = zext i8 %tmp12 to i16
   %tmp14 = shl nuw nsw i16 %tmp10, 8
   %tmp15 = or i16 %tmp14, %tmp13
@@ -59,258 +57,249 @@ define i32 @load_i32_by_i16_by_i8_big_endian(i32* %arg) {
   ret i32 %tmp19
 }
 
-; i16* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; ((i32) p[0] << 16) | (i32) p[1]
-define i32 @load_i32_by_i16(i32* %arg) {
+define i32 @load_i32_by_i16(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i16*
-  %tmp1 = load i16, i16* %tmp, align 4
+  %tmp1 = load i16, ptr %arg, align 4
   %tmp2 = zext i16 %tmp1 to i32
-  %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
-  %tmp4 = load i16, i16* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i16, ptr %arg, i32 1
+  %tmp4 = load i16, ptr %tmp3, align 1
   %tmp5 = zext i16 %tmp4 to i32
   %tmp6 = shl nuw nsw i32 %tmp2, 16
   %tmp7 = or i32 %tmp6, %tmp5
   ret i32 %tmp7
 }
 
-; i16* p_16; // p_16 is 4 byte aligned
-; i8* p_8 = (i8*) p_16;
+; ptr p_16; // p_16 is 4 byte aligned
+; ptr p_8 = (ptr) p_16;
 ; (i32) (p_16[0] << 16) | ((i32) p[2] << 8) | (i32) p[3]
-define i32 @load_i32_by_i16_i8(i32* %arg) {
+define i32 @load_i32_by_i16_i8(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i16_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i16*
-  %tmp1 = bitcast i32* %arg to i8*
-  %tmp2 = load i16, i16* %tmp, align 4
+  %tmp2 = load i16, ptr %arg, align 4
   %tmp3 = zext i16 %tmp2 to i32
   %tmp4 = shl nuw nsw i32 %tmp3, 16
-  %tmp5 = getelementptr inbounds i8, i8* %tmp1, i32 2
-  %tmp6 = load i8, i8* %tmp5, align 1
+  %tmp5 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp6 = load i8, ptr %tmp5, align 1
   %tmp7 = zext i8 %tmp6 to i32
   %tmp8 = shl nuw nsw i32 %tmp7, 8
-  %tmp9 = getelementptr inbounds i8, i8* %tmp1, i32 3
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = or i32 %tmp8, %tmp11
   %tmp13 = or i32 %tmp12, %tmp4
   ret i32 %tmp13
 }
 
-; i8* p; // p is 8 byte aligned
+; ptr p; // p is 8 byte aligned
 ; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
-define i64 @load_i64_by_i8_bswap(i64* %arg) {
+define i64 @load_i64_by_i8_bswap(ptr %arg) {
 ; CHECK-LABEL: load_i64_by_i8_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    rev x0, x8
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i64* %arg to i8*
-  %tmp1 = load i8, i8* %tmp, align 8
+  %tmp1 = load i8, ptr %arg, align 8
   %tmp2 = zext i8 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %arg, i64 1
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = zext i8 %tmp4 to i64
   %tmp6 = shl nuw nsw i64 %tmp5, 8
   %tmp7 = or i64 %tmp6, %tmp2
-  %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
-  %tmp9 = load i8, i8* %tmp8, align 1
+  %tmp8 = getelementptr inbounds i8, ptr %arg, i64 2
+  %tmp9 = load i8, ptr %tmp8, align 1
   %tmp10 = zext i8 %tmp9 to i64
   %tmp11 = shl nuw nsw i64 %tmp10, 16
   %tmp12 = or i64 %tmp7, %tmp11
-  %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
-  %tmp14 = load i8, i8* %tmp13, align 1
+  %tmp13 = getelementptr inbounds i8, ptr %arg, i64 3
+  %tmp14 = load i8, ptr %tmp13, align 1
   %tmp15 = zext i8 %tmp14 to i64
   %tmp16 = shl nuw nsw i64 %tmp15, 24
   %tmp17 = or i64 %tmp12, %tmp16
-  %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
-  %tmp19 = load i8, i8* %tmp18, align 1
+  %tmp18 = getelementptr inbounds i8, ptr %arg, i64 4
+  %tmp19 = load i8, ptr %tmp18, align 1
   %tmp20 = zext i8 %tmp19 to i64
   %tmp21 = shl nuw nsw i64 %tmp20, 32
   %tmp22 = or i64 %tmp17, %tmp21
-  %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
-  %tmp24 = load i8, i8* %tmp23, align 1
+  %tmp23 = getelementptr inbounds i8, ptr %arg, i64 5
+  %tmp24 = load i8, ptr %tmp23, align 1
   %tmp25 = zext i8 %tmp24 to i64
   %tmp26 = shl nuw nsw i64 %tmp25, 40
   %tmp27 = or i64 %tmp22, %tmp26
-  %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
-  %tmp29 = load i8, i8* %tmp28, align 1
+  %tmp28 = getelementptr inbounds i8, ptr %arg, i64 6
+  %tmp29 = load i8, ptr %tmp28, align 1
   %tmp30 = zext i8 %tmp29 to i64
   %tmp31 = shl nuw nsw i64 %tmp30, 48
   %tmp32 = or i64 %tmp27, %tmp31
-  %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
-  %tmp34 = load i8, i8* %tmp33, align 1
+  %tmp33 = getelementptr inbounds i8, ptr %arg, i64 7
+  %tmp34 = load i8, ptr %tmp33, align 1
   %tmp35 = zext i8 %tmp34 to i64
   %tmp36 = shl nuw i64 %tmp35, 56
   %tmp37 = or i64 %tmp32, %tmp36
   ret i64 %tmp37
 }
 
-; i8* p; // p is 8 byte aligned
+; ptr p; // p is 8 byte aligned
 ; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
-define i64 @load_i64_by_i8(i64* %arg) {
+define i64 @load_i64_by_i8(ptr %arg) {
 ; CHECK-LABEL: load_i64_by_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i64* %arg to i8*
-  %tmp1 = load i8, i8* %tmp, align 8
+  %tmp1 = load i8, ptr %arg, align 8
   %tmp2 = zext i8 %tmp1 to i64
   %tmp3 = shl nuw i64 %tmp2, 56
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i64 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i64
   %tmp7 = shl nuw nsw i64 %tmp6, 48
   %tmp8 = or i64 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i64 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i64
   %tmp12 = shl nuw nsw i64 %tmp11, 40
   %tmp13 = or i64 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i64 3
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i64
   %tmp17 = shl nuw nsw i64 %tmp16, 32
   %tmp18 = or i64 %tmp13, %tmp17
-  %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
-  %tmp20 = load i8, i8* %tmp19, align 1
+  %tmp19 = getelementptr inbounds i8, ptr %arg, i64 4
+  %tmp20 = load i8, ptr %tmp19, align 1
   %tmp21 = zext i8 %tmp20 to i64
   %tmp22 = shl nuw nsw i64 %tmp21, 24
   %tmp23 = or i64 %tmp18, %tmp22
-  %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
-  %tmp25 = load i8, i8* %tmp24, align 1
+  %tmp24 = getelementptr inbounds i8, ptr %arg, i64 5
+  %tmp25 = load i8, ptr %tmp24, align 1
   %tmp26 = zext i8 %tmp25 to i64
   %tmp27 = shl nuw nsw i64 %tmp26, 16
   %tmp28 = or i64 %tmp23, %tmp27
-  %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
-  %tmp30 = load i8, i8* %tmp29, align 1
+  %tmp29 = getelementptr inbounds i8, ptr %arg, i64 6
+  %tmp30 = load i8, ptr %tmp29, align 1
   %tmp31 = zext i8 %tmp30 to i64
   %tmp32 = shl nuw nsw i64 %tmp31, 8
   %tmp33 = or i64 %tmp28, %tmp32
-  %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
-  %tmp35 = load i8, i8* %tmp34, align 1
+  %tmp34 = getelementptr inbounds i8, ptr %arg, i64 7
+  %tmp35 = load i8, ptr %tmp34, align 1
   %tmp36 = zext i8 %tmp35 to i64
   %tmp37 = or i64 %tmp33, %tmp36
   ret i64 %tmp37
 }
 
-; i8* p; // p[1] is 4 byte aligned
+; ptr p; // p[1] is 4 byte aligned
 ; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
-define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
+define i32 @load_i32_by_i8_nonzero_offset(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w8, [x0, #1]
 ; CHECK-NEXT:    rev w0, w8
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 4
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 4
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 4
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p[-4] is 4 byte aligned
+; ptr p; // p[-4] is 4 byte aligned
 ; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
-define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
+define i32 @load_i32_by_i8_neg_offset(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_neg_offset:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w8, [x0, #-4]
 ; CHECK-NEXT:    rev w0, w8
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
-  %tmp2 = load i8, i8* %tmp1, align 4
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 -4
+  %tmp2 = load i8, ptr %tmp1, align 4
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 -3
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 -2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 -1
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p[1] is 4 byte aligned
+; ptr p; // p[1] is 4 byte aligned
 ; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
-define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
+define i32 @load_i32_by_i8_nonzero_offset_bswap(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_nonzero_offset_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w0, [x0, #1]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 4
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp15 = load i8, i8* %tmp14, align 4
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp15 = load i8, ptr %tmp14, align 4
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p[-4] is 4 byte aligned
+; ptr p; // p[-4] is 4 byte aligned
 ; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
-define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
+define i32 @load_i32_by_i8_neg_offset_bswap(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_neg_offset_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w0, [x0, #-4]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 -1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 -2
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 -3
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
-  %tmp15 = load i8, i8* %tmp14, align 4
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 -4
+  %tmp15 = load i8, ptr %tmp14, align 4
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
@@ -319,20 +308,19 @@ define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
 
 declare i16 @llvm.bswap.i16(i16)
 
-; i16* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; (i32) bswap(p[0]) | (i32) bswap(p[1] << 16)
-define i32 @load_i32_by_bswap_i16(i32* %arg) {
+define i32 @load_i32_by_bswap_i16(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_bswap_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    rev w0, w8
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i16*
-  %tmp1 = load i16, i16* %tmp, align 4
+  %tmp1 = load i16, ptr %arg, align 4
   %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
   %tmp2 = zext i16 %tmp11 to i32
-  %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
-  %tmp4 = load i16, i16* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i16, ptr %arg, i32 1
+  %tmp4 = load i16, ptr %tmp3, align 1
   %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
   %tmp5 = zext i16 %tmp41 to i32
   %tmp6 = shl nuw nsw i32 %tmp5, 16
@@ -340,28 +328,27 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) {
   ret i32 %tmp7
 }
 
-; i16* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; (i32) p[1] | (sext(p[0] << 16) to i32)
-define i32 @load_i32_by_sext_i16(i32* %arg) {
+define i32 @load_i32_by_sext_i16(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_sext_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i16*
-  %tmp1 = load i16, i16* %tmp, align 4
+  %tmp1 = load i16, ptr %arg, align 4
   %tmp2 = sext i16 %tmp1 to i32
-  %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
-  %tmp4 = load i16, i16* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i16, ptr %arg, i32 1
+  %tmp4 = load i16, ptr %tmp3, align 1
   %tmp5 = zext i16 %tmp4 to i32
   %tmp6 = shl nuw nsw i32 %tmp2, 16
   %tmp7 = or i32 %tmp6, %tmp5
   ret i32 %tmp7
 }
 
-; i8* arg; i32 i;
+; ptr arg; i32 i;
 ; p = arg + 12;
 ; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
-define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
+define i32 @load_i32_by_i8_base_offset_index(ptr %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, w1, uxtw
@@ -371,36 +358,36 @@ define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
   %tmp = add nuw nsw i32 %i, 3
   %tmp2 = add nuw nsw i32 %i, 2
   %tmp3 = add nuw nsw i32 %i, 1
-  %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i64 12
   %tmp5 = zext i32 %i to i64
-  %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
-  %tmp7 = load i8, i8* %tmp6, align 4
+  %tmp6 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp5
+  %tmp7 = load i8, ptr %tmp6, align 4
   %tmp8 = zext i8 %tmp7 to i32
   %tmp9 = zext i32 %tmp3 to i64
-  %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
-  %tmp11 = load i8, i8* %tmp10, align 1
+  %tmp10 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp9
+  %tmp11 = load i8, ptr %tmp10, align 1
   %tmp12 = zext i8 %tmp11 to i32
   %tmp13 = shl nuw nsw i32 %tmp12, 8
   %tmp14 = or i32 %tmp13, %tmp8
   %tmp15 = zext i32 %tmp2 to i64
-  %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
-  %tmp17 = load i8, i8* %tmp16, align 1
+  %tmp16 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp15
+  %tmp17 = load i8, ptr %tmp16, align 1
   %tmp18 = zext i8 %tmp17 to i32
   %tmp19 = shl nuw nsw i32 %tmp18, 16
   %tmp20 = or i32 %tmp14, %tmp19
   %tmp21 = zext i32 %tmp to i64
-  %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
-  %tmp23 = load i8, i8* %tmp22, align 1
+  %tmp22 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp21
+  %tmp23 = load i8, ptr %tmp22, align 1
   %tmp24 = zext i8 %tmp23 to i32
   %tmp25 = shl nuw i32 %tmp24, 24
   %tmp26 = or i32 %tmp20, %tmp25
   ret i32 %tmp26
 }
 
-; i8* arg; i32 i;
+; ptr arg; i32 i;
 ; p = arg + 12;
 ; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
-define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
+define i32 @load_i32_by_i8_base_offset_index_2(ptr %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, w1, uxtw
@@ -410,55 +397,53 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
   %tmp = add nuw nsw i32 %i, 4
   %tmp2 = add nuw nsw i32 %i, 3
   %tmp3 = add nuw nsw i32 %i, 2
-  %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i64 12
   %tmp5 = add nuw nsw i32 %i, 1
   %tmp27 = zext i32 %tmp5 to i64
-  %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
-  %tmp29 = load i8, i8* %tmp28, align 4
+  %tmp28 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp27
+  %tmp29 = load i8, ptr %tmp28, align 4
   %tmp30 = zext i8 %tmp29 to i32
   %tmp31 = zext i32 %tmp3 to i64
-  %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
-  %tmp33 = load i8, i8* %tmp32, align 1
+  %tmp32 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp31
+  %tmp33 = load i8, ptr %tmp32, align 1
   %tmp34 = zext i8 %tmp33 to i32
   %tmp35 = shl nuw nsw i32 %tmp34, 8
   %tmp36 = or i32 %tmp35, %tmp30
   %tmp37 = zext i32 %tmp2 to i64
-  %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
-  %tmp39 = load i8, i8* %tmp38, align 1
+  %tmp38 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp37
+  %tmp39 = load i8, ptr %tmp38, align 1
   %tmp40 = zext i8 %tmp39 to i32
   %tmp41 = shl nuw nsw i32 %tmp40, 16
   %tmp42 = or i32 %tmp36, %tmp41
   %tmp43 = zext i32 %tmp to i64
-  %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
-  %tmp45 = load i8, i8* %tmp44, align 1
+  %tmp44 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp43
+  %tmp45 = load i8, ptr %tmp44, align 1
   %tmp46 = zext i8 %tmp45 to i32
   %tmp47 = shl nuw i32 %tmp46, 24
   %tmp48 = or i32 %tmp42, %tmp47
   ret i32 %tmp48
 }
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; (i32) p[0] | ((i32) p[1] << 8)
-define i32 @zext_load_i32_by_i8(i32* %arg) {
+define i32 @zext_load_i32_by_i8(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    rev16 w0, w8
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 2
+  %tmp2 = load i8, ptr %arg, align 2
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[0] << 8) | ((i32) p[1] << 16)
-define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
+define i32 @zext_load_i32_by_i8_shl_8(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -466,22 +451,20 @@ define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
 ; CHECK-NEXT:    lsl w8, w8, #8
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #16
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 2
+  %tmp2 = load i8, ptr %arg, align 2
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 8
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 16
   %tmp8 = or i32 %tmp7, %tmp30
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[0] << 16) | ((i32) p[1] << 24)
-define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
+define i32 @zext_load_i32_by_i8_shl_16(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -489,40 +472,36 @@ define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
 ; CHECK-NEXT:    lsl w8, w8, #16
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #24
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 2
+  %tmp2 = load i8, ptr %arg, align 2
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 16
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 24
   %tmp8 = or i32 %tmp7, %tmp30
   ret i32 %tmp8
 }
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; (i32) p[1] | ((i32) p[0] << 8)
-define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
+define i32 @zext_load_i32_by_i8_bswap(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp5 = load i8, i8* %tmp4, align 2
+  %tmp5 = load i8, ptr %arg, align 2
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[1] << 8) | ((i32) p[0] << 16)
-define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
+define i32 @zext_load_i32_by_i8_bswap_shl_8(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0, #1]
@@ -530,22 +509,20 @@ define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
 ; CHECK-NEXT:    lsl w8, w8, #8
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #16
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 8
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp5 = load i8, i8* %tmp4, align 2
+  %tmp5 = load i8, ptr %arg, align 2
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 16
   %tmp8 = or i32 %tmp7, %tmp30
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[1] << 16) | ((i32) p[0] << 24)
-define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
+define i32 @zext_load_i32_by_i8_bswap_shl_16(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0, #1]
@@ -553,36 +530,33 @@ define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
 ; CHECK-NEXT:    lsl w8, w8, #16
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #24
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 16
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp5 = load i8, i8* %tmp4, align 2
+  %tmp5 = load i8, ptr %arg, align 2
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 24
   %tmp8 = or i32 %tmp7, %tmp30
   ret i32 %tmp8
 }
 
-; i8* p;
-; i16* p1.i16 = (i16*) p;
+; ptr p;
+; ptr p1.i16 = (ptr) p;
 ; (p1.i16[0] << 8) | ((i16) p[2])
 ;
 ; This is essentialy a i16 load from p[1], but we don't fold the pattern now
 ; because in the original DAG we don't have p[1] address available
-define i16 @load_i16_from_nonzero_offset(i8* %p) {
+define i16 @load_i16_from_nonzero_offset(ptr %p) {
 ; CHECK-LABEL: load_i16_from_nonzero_offset:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    ldrb w9, [x0, #2]
 ; CHECK-NEXT:    orr w0, w9, w8, lsl #8
 ; CHECK-NEXT:    ret
-  %p1.i16 = bitcast i8* %p to i16*
-  %p2.i8 = getelementptr i8, i8* %p, i64 2
-  %v1 = load i16, i16* %p1.i16
-  %v2.i8 = load i8, i8* %p2.i8
+  %p2.i8 = getelementptr i8, ptr %p, i64 2
+  %v1 = load i16, ptr %p
+  %v2.i8 = load i8, ptr %p2.i8
   %v2 = zext i8 %v2.i8 to i16
   %v1.shl = shl i16 %v1, 8
   %res = or i16 %v1.shl, %v2

diff  --git a/llvm/test/CodeGen/AArch64/load-combine.ll b/llvm/test/CodeGen/AArch64/load-combine.ll
index de1b0f13adf0a..57f61e5303ecf 100644
--- a/llvm/test/CodeGen/AArch64/load-combine.ll
+++ b/llvm/test/CodeGen/AArch64/load-combine.ll
@@ -1,306 +1,295 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-unknown | FileCheck %s
 
-; i8* p; // p is 1 byte aligned
+; ptr p; // p is 1 byte aligned
 ; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
-define i32 @load_i32_by_i8_unaligned(i32* %arg) {
+define i32 @load_i32_by_i8_unaligned(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_unaligned:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp2 = load i8, ptr %arg, align 1
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
-define i32 @load_i32_by_i8_aligned(i32* %arg) {
+define i32 @load_i32_by_i8_aligned(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_aligned:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 4
+  %tmp2 = load i8, ptr %arg, align 4
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
-define i32 @load_i32_by_i8_bswap(i32* %arg) {
+define i32 @load_i32_by_i8_bswap(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    rev w0, w8
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = load i8, i8* %tmp, align 4
+  %tmp1 = load i8, ptr %arg, align 4
   %tmp2 = zext i8 %tmp1 to i32
   %tmp3 = shl nuw nsw i32 %tmp2, 24
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 16
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 8
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = or i32 %tmp13, %tmp16
   ret i32 %tmp17
 }
 
-; i8* p; // p is 8 byte aligned
+; ptr p; // p is 8 byte aligned
 ; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
-define i64 @load_i64_by_i8(i64* %arg) {
+define i64 @load_i64_by_i8(ptr %arg) {
 ; CHECK-LABEL: load_i64_by_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i64* %arg to i8*
-  %tmp1 = load i8, i8* %tmp, align 8
+  %tmp1 = load i8, ptr %arg, align 8
   %tmp2 = zext i8 %tmp1 to i64
-  %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
-  %tmp4 = load i8, i8* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr %arg, i64 1
+  %tmp4 = load i8, ptr %tmp3, align 1
   %tmp5 = zext i8 %tmp4 to i64
   %tmp6 = shl nuw nsw i64 %tmp5, 8
   %tmp7 = or i64 %tmp6, %tmp2
-  %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
-  %tmp9 = load i8, i8* %tmp8, align 1
+  %tmp8 = getelementptr inbounds i8, ptr %arg, i64 2
+  %tmp9 = load i8, ptr %tmp8, align 1
   %tmp10 = zext i8 %tmp9 to i64
   %tmp11 = shl nuw nsw i64 %tmp10, 16
   %tmp12 = or i64 %tmp7, %tmp11
-  %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
-  %tmp14 = load i8, i8* %tmp13, align 1
+  %tmp13 = getelementptr inbounds i8, ptr %arg, i64 3
+  %tmp14 = load i8, ptr %tmp13, align 1
   %tmp15 = zext i8 %tmp14 to i64
   %tmp16 = shl nuw nsw i64 %tmp15, 24
   %tmp17 = or i64 %tmp12, %tmp16
-  %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
-  %tmp19 = load i8, i8* %tmp18, align 1
+  %tmp18 = getelementptr inbounds i8, ptr %arg, i64 4
+  %tmp19 = load i8, ptr %tmp18, align 1
   %tmp20 = zext i8 %tmp19 to i64
   %tmp21 = shl nuw nsw i64 %tmp20, 32
   %tmp22 = or i64 %tmp17, %tmp21
-  %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
-  %tmp24 = load i8, i8* %tmp23, align 1
+  %tmp23 = getelementptr inbounds i8, ptr %arg, i64 5
+  %tmp24 = load i8, ptr %tmp23, align 1
   %tmp25 = zext i8 %tmp24 to i64
   %tmp26 = shl nuw nsw i64 %tmp25, 40
   %tmp27 = or i64 %tmp22, %tmp26
-  %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
-  %tmp29 = load i8, i8* %tmp28, align 1
+  %tmp28 = getelementptr inbounds i8, ptr %arg, i64 6
+  %tmp29 = load i8, ptr %tmp28, align 1
   %tmp30 = zext i8 %tmp29 to i64
   %tmp31 = shl nuw nsw i64 %tmp30, 48
   %tmp32 = or i64 %tmp27, %tmp31
-  %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
-  %tmp34 = load i8, i8* %tmp33, align 1
+  %tmp33 = getelementptr inbounds i8, ptr %arg, i64 7
+  %tmp34 = load i8, ptr %tmp33, align 1
   %tmp35 = zext i8 %tmp34 to i64
   %tmp36 = shl nuw i64 %tmp35, 56
   %tmp37 = or i64 %tmp32, %tmp36
   ret i64 %tmp37
 }
 
-; i8* p; // p is 8 byte aligned
+; ptr p; // p is 8 byte aligned
 ; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
-define i64 @load_i64_by_i8_bswap(i64* %arg) {
+define i64 @load_i64_by_i8_bswap(ptr %arg) {
 ; CHECK-LABEL: load_i64_by_i8_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    rev x0, x8
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i64* %arg to i8*
-  %tmp1 = load i8, i8* %tmp, align 8
+  %tmp1 = load i8, ptr %arg, align 8
   %tmp2 = zext i8 %tmp1 to i64
   %tmp3 = shl nuw i64 %tmp2, 56
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i64 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i64
   %tmp7 = shl nuw nsw i64 %tmp6, 48
   %tmp8 = or i64 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i64 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i64
   %tmp12 = shl nuw nsw i64 %tmp11, 40
   %tmp13 = or i64 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i64 3
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i64
   %tmp17 = shl nuw nsw i64 %tmp16, 32
   %tmp18 = or i64 %tmp13, %tmp17
-  %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
-  %tmp20 = load i8, i8* %tmp19, align 1
+  %tmp19 = getelementptr inbounds i8, ptr %arg, i64 4
+  %tmp20 = load i8, ptr %tmp19, align 1
   %tmp21 = zext i8 %tmp20 to i64
   %tmp22 = shl nuw nsw i64 %tmp21, 24
   %tmp23 = or i64 %tmp18, %tmp22
-  %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
-  %tmp25 = load i8, i8* %tmp24, align 1
+  %tmp24 = getelementptr inbounds i8, ptr %arg, i64 5
+  %tmp25 = load i8, ptr %tmp24, align 1
   %tmp26 = zext i8 %tmp25 to i64
   %tmp27 = shl nuw nsw i64 %tmp26, 16
   %tmp28 = or i64 %tmp23, %tmp27
-  %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
-  %tmp30 = load i8, i8* %tmp29, align 1
+  %tmp29 = getelementptr inbounds i8, ptr %arg, i64 6
+  %tmp30 = load i8, ptr %tmp29, align 1
   %tmp31 = zext i8 %tmp30 to i64
   %tmp32 = shl nuw nsw i64 %tmp31, 8
   %tmp33 = or i64 %tmp28, %tmp32
-  %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
-  %tmp35 = load i8, i8* %tmp34, align 1
+  %tmp34 = getelementptr inbounds i8, ptr %arg, i64 7
+  %tmp35 = load i8, ptr %tmp34, align 1
   %tmp36 = zext i8 %tmp35 to i64
   %tmp37 = or i64 %tmp33, %tmp36
   ret i64 %tmp37
 }
 
-; i8* p; // p[1] is 4 byte aligned
+; ptr p; // p[1] is 4 byte aligned
 ; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
-define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
+define i32 @load_i32_by_i8_nonzero_offset(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w0, [x0, #1]
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 4
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 4
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 4
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p[-4] is 4 byte aligned
+; ptr p; // p[-4] is 4 byte aligned
 ; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
-define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
+define i32 @load_i32_by_i8_neg_offset(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_neg_offset:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w0, [x0, #-4]
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
-  %tmp2 = load i8, i8* %tmp1, align 4
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 -4
+  %tmp2 = load i8, ptr %tmp1, align 4
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 -3
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 -2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
-  %tmp15 = load i8, i8* %tmp14, align 1
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 -1
+  %tmp15 = load i8, ptr %tmp14, align 1
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p[1] is 4 byte aligned
+; ptr p; // p[1] is 4 byte aligned
 ; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
-define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
+define i32 @load_i32_by_i8_nonzero_offset_bswap(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_nonzero_offset_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w8, [x0, #1]
 ; CHECK-NEXT:    rev w0, w8
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 4
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 3
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 2
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp15 = load i8, i8* %tmp14, align 4
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp15 = load i8, ptr %tmp14, align 4
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
   ret i32 %tmp18
 }
 
-; i8* p; // p[-4] is 4 byte aligned
+; ptr p; // p[-4] is 4 byte aligned
 ; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
-define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
+define i32 @load_i32_by_i8_neg_offset_bswap(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_i8_neg_offset_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldur w8, [x0, #-4]
 ; CHECK-NEXT:    rev w0, w8
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 -1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 -2
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
-  %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
-  %tmp10 = load i8, i8* %tmp9, align 1
+  %tmp9 = getelementptr inbounds i8, ptr %arg, i32 -3
+  %tmp10 = load i8, ptr %tmp9, align 1
   %tmp11 = zext i8 %tmp10 to i32
   %tmp12 = shl nuw nsw i32 %tmp11, 16
   %tmp13 = or i32 %tmp8, %tmp12
-  %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
-  %tmp15 = load i8, i8* %tmp14, align 4
+  %tmp14 = getelementptr inbounds i8, ptr %arg, i32 -4
+  %tmp15 = load i8, ptr %tmp14, align 4
   %tmp16 = zext i8 %tmp15 to i32
   %tmp17 = shl nuw nsw i32 %tmp16, 24
   %tmp18 = or i32 %tmp13, %tmp17
@@ -309,21 +298,20 @@ define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
 
 declare i16 @llvm.bswap.i16(i16)
 
-; i16* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16)
-define i32 @load_i32_by_bswap_i16(i32* %arg) {
+define i32 @load_i32_by_bswap_i16(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_bswap_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    rev w0, w8
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i16*
-  %tmp1 = load i16, i16* %tmp, align 4
+  %tmp1 = load i16, ptr %arg, align 4
   %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
   %tmp2 = zext i16 %tmp11 to i32
-  %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
-  %tmp4 = load i16, i16* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i16, ptr %arg, i32 1
+  %tmp4 = load i16, ptr %tmp3, align 1
   %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
   %tmp5 = zext i16 %tmp41 to i32
   %tmp6 = shl nuw nsw i32 %tmp2, 16
@@ -331,28 +319,27 @@ define i32 @load_i32_by_bswap_i16(i32* %arg) {
   ret i32 %tmp7
 }
 
-; i16* p; // p is 4 byte aligned
+; ptr p; // p is 4 byte aligned
 ; (i32) p[0] | (sext(p[1] << 16) to i32)
-define i32 @load_i32_by_sext_i16(i32* %arg) {
+define i32 @load_i32_by_sext_i16(ptr %arg) {
 ; CHECK-LABEL: load_i32_by_sext_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
-  %tmp = bitcast i32* %arg to i16*
-  %tmp1 = load i16, i16* %tmp, align 4
+  %tmp1 = load i16, ptr %arg, align 4
   %tmp2 = zext i16 %tmp1 to i32
-  %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
-  %tmp4 = load i16, i16* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i16, ptr %arg, i32 1
+  %tmp4 = load i16, ptr %tmp3, align 1
   %tmp5 = sext i16 %tmp4 to i32
   %tmp6 = shl nuw nsw i32 %tmp5, 16
   %tmp7 = or i32 %tmp6, %tmp2
   ret i32 %tmp7
 }
 
-; i8* arg; i32 i;
+; ptr arg; i32 i;
 ; p = arg + 12;
 ; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
-define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
+define i32 @load_i32_by_i8_base_offset_index(ptr %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, w1, uxtw
@@ -361,36 +348,36 @@ define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
   %tmp = add nuw nsw i32 %i, 3
   %tmp2 = add nuw nsw i32 %i, 2
   %tmp3 = add nuw nsw i32 %i, 1
-  %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i64 12
   %tmp5 = zext i32 %i to i64
-  %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
-  %tmp7 = load i8, i8* %tmp6, align 4
+  %tmp6 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp5
+  %tmp7 = load i8, ptr %tmp6, align 4
   %tmp8 = zext i8 %tmp7 to i32
   %tmp9 = zext i32 %tmp3 to i64
-  %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
-  %tmp11 = load i8, i8* %tmp10, align 1
+  %tmp10 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp9
+  %tmp11 = load i8, ptr %tmp10, align 1
   %tmp12 = zext i8 %tmp11 to i32
   %tmp13 = shl nuw nsw i32 %tmp12, 8
   %tmp14 = or i32 %tmp13, %tmp8
   %tmp15 = zext i32 %tmp2 to i64
-  %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
-  %tmp17 = load i8, i8* %tmp16, align 1
+  %tmp16 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp15
+  %tmp17 = load i8, ptr %tmp16, align 1
   %tmp18 = zext i8 %tmp17 to i32
   %tmp19 = shl nuw nsw i32 %tmp18, 16
   %tmp20 = or i32 %tmp14, %tmp19
   %tmp21 = zext i32 %tmp to i64
-  %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
-  %tmp23 = load i8, i8* %tmp22, align 1
+  %tmp22 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp21
+  %tmp23 = load i8, ptr %tmp22, align 1
   %tmp24 = zext i8 %tmp23 to i32
   %tmp25 = shl nuw i32 %tmp24, 24
   %tmp26 = or i32 %tmp20, %tmp25
   ret i32 %tmp26
 }
 
-; i8* arg; i32 i;
+; ptr arg; i32 i;
 ; p = arg + 12;
 ; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
-define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
+define i32 @load_i32_by_i8_base_offset_index_2(ptr %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, w1, uxtw
@@ -399,56 +386,54 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
   %tmp = add nuw nsw i32 %i, 4
   %tmp2 = add nuw nsw i32 %i, 3
   %tmp3 = add nuw nsw i32 %i, 2
-  %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i64 12
   %tmp5 = add nuw nsw i32 %i, 1
   %tmp27 = zext i32 %tmp5 to i64
-  %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
-  %tmp29 = load i8, i8* %tmp28, align 4
+  %tmp28 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp27
+  %tmp29 = load i8, ptr %tmp28, align 4
   %tmp30 = zext i8 %tmp29 to i32
   %tmp31 = zext i32 %tmp3 to i64
-  %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
-  %tmp33 = load i8, i8* %tmp32, align 1
+  %tmp32 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp31
+  %tmp33 = load i8, ptr %tmp32, align 1
   %tmp34 = zext i8 %tmp33 to i32
   %tmp35 = shl nuw nsw i32 %tmp34, 8
   %tmp36 = or i32 %tmp35, %tmp30
   %tmp37 = zext i32 %tmp2 to i64
-  %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
-  %tmp39 = load i8, i8* %tmp38, align 1
+  %tmp38 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp37
+  %tmp39 = load i8, ptr %tmp38, align 1
   %tmp40 = zext i8 %tmp39 to i32
   %tmp41 = shl nuw nsw i32 %tmp40, 16
   %tmp42 = or i32 %tmp36, %tmp41
   %tmp43 = zext i32 %tmp to i64
-  %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
-  %tmp45 = load i8, i8* %tmp44, align 1
+  %tmp44 = getelementptr inbounds i8, ptr %tmp4, i64 %tmp43
+  %tmp45 = load i8, ptr %tmp44, align 1
   %tmp46 = zext i8 %tmp45 to i32
   %tmp47 = shl nuw i32 %tmp46, 24
   %tmp48 = or i32 %tmp42, %tmp47
   ret i32 %tmp48
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; (i32) p[0] | ((i32) p[1] << 8)
-define i32 @zext_load_i32_by_i8(i32* %arg) {
+define i32 @zext_load_i32_by_i8(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w0, [x0]
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 2
+  %tmp2 = load i8, ptr %arg, align 2
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[0] << 8) | ((i32) p[1] << 16)
-define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
+define i32 @zext_load_i32_by_i8_shl_8(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_shl_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -457,22 +442,20 @@ define i32 @zext_load_i32_by_i8_shl_8(i32* %arg) {
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #16
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 2
+  %tmp2 = load i8, ptr %arg, align 2
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 8
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 16
   %tmp8 = or i32 %tmp7, %tmp30
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[0] << 16) | ((i32) p[1] << 24)
-define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
+define i32 @zext_load_i32_by_i8_shl_16(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_shl_16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -481,42 +464,38 @@ define i32 @zext_load_i32_by_i8_shl_16(i32* %arg) {
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #24
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp2 = load i8, i8* %tmp1, align 2
+  %tmp2 = load i8, ptr %arg, align 2
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 16
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp5 = load i8, i8* %tmp4, align 1
+  %tmp4 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp5 = load i8, ptr %tmp4, align 1
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 24
   %tmp8 = or i32 %tmp7, %tmp30
   ret i32 %tmp8
 }
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; (i32) p[1] | ((i32) p[0] << 8)
-define i32 @zext_load_i32_by_i8_bswap(i32* %arg) {
+define i32 @zext_load_i32_by_i8_bswap(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    rev16 w0, w8
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp5 = load i8, i8* %tmp4, align 2
+  %tmp5 = load i8, ptr %arg, align 2
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 8
   %tmp8 = or i32 %tmp7, %tmp3
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[1] << 8) | ((i32) p[0] << 16)
-define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
+define i32 @zext_load_i32_by_i8_bswap_shl_8(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0, #1]
@@ -525,22 +504,20 @@ define i32 @zext_load_i32_by_i8_bswap_shl_8(i32* %arg) {
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #16
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 8
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp5 = load i8, i8* %tmp4, align 2
+  %tmp5 = load i8, ptr %arg, align 2
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 16
   %tmp8 = or i32 %tmp7, %tmp30
   ret i32 %tmp8
 }
 
-; i8* p; // p is 2 byte aligned
+; ptr p; // p is 2 byte aligned
 ; ((i32) p[1] << 16) | ((i32) p[0] << 24)
-define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
+define i32 @zext_load_i32_by_i8_bswap_shl_16(ptr %arg) {
 ; CHECK-LABEL: zext_load_i32_by_i8_bswap_shl_16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0, #1]
@@ -549,13 +526,11 @@ define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
 ; CHECK-NEXT:    orr w0, w8, w9, lsl #24
 ; CHECK-NEXT:    ret
 
-  %tmp = bitcast i32* %arg to i8*
-  %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
-  %tmp2 = load i8, i8* %tmp1, align 1
+  %tmp1 = getelementptr inbounds i8, ptr %arg, i32 1
+  %tmp2 = load i8, ptr %tmp1, align 1
   %tmp3 = zext i8 %tmp2 to i32
   %tmp30 = shl nuw nsw i32 %tmp3, 16
-  %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 0
-  %tmp5 = load i8, i8* %tmp4, align 2
+  %tmp5 = load i8, ptr %arg, align 2
   %tmp6 = zext i8 %tmp5 to i32
   %tmp7 = shl nuw nsw i32 %tmp6, 24
   %tmp8 = or i32 %tmp7, %tmp30
@@ -563,13 +538,13 @@ define i32 @zext_load_i32_by_i8_bswap_shl_16(i32* %arg) {
 }
 
 ; x1 = x0
-define void @short_vector_to_i32(<4 x i8>* %in, i32* %out, i32* %p) {
+define void @short_vector_to_i32(ptr %in, ptr %out, ptr %p) {
 ; CHECK-LABEL: short_vector_to_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ld = load <4 x i8>, <4 x i8>* %in, align 4
+  %ld = load <4 x i8>, ptr %in, align 4
 
   %e1 = extractelement <4 x i8> %ld, i32 0
   %e2 = extractelement <4 x i8> %ld, i32 1
@@ -589,11 +564,11 @@ define void @short_vector_to_i32(<4 x i8>* %in, i32* %out, i32* %p) {
   %i2 = or i32 %i1, %s2
   %i3 = or i32 %i2, %s3
 
-  store i32 %i3, i32* %out
+  store i32 %i3, ptr %out
   ret void
 }
 
-define void @short_vector_to_i32_unused_low_i8(<4 x i8>* %in, i32* %out, i32* %p) {
+define void @short_vector_to_i32_unused_low_i8(ptr %in, ptr %out, ptr %p) {
 ; CHECK-LABEL: short_vector_to_i32_unused_low_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -606,7 +581,7 @@ define void @short_vector_to_i32_unused_low_i8(<4 x i8>* %in, i32* %out, i32* %p
 ; CHECK-NEXT:    orr w8, w8, w10, lsl #24
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ld = load <4 x i8>, <4 x i8>* %in, align 4
+  %ld = load <4 x i8>, ptr %in, align 4
 
   %e2 = extractelement <4 x i8> %ld, i32 1
   %e3 = extractelement <4 x i8> %ld, i32 2
@@ -623,11 +598,11 @@ define void @short_vector_to_i32_unused_low_i8(<4 x i8>* %in, i32* %out, i32* %p
   %i2 = or i32 %s1, %s2
   %i3 = or i32 %i2, %s3
 
-  store i32 %i3, i32* %out
+  store i32 %i3, ptr %out
   ret void
 }
 
-define void @short_vector_to_i32_unused_high_i8(<4 x i8>* %in, i32* %out, i32* %p) {
+define void @short_vector_to_i32_unused_high_i8(ptr %in, ptr %out, ptr %p) {
 ; CHECK-LABEL: short_vector_to_i32_unused_high_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -637,7 +612,7 @@ define void @short_vector_to_i32_unused_high_i8(<4 x i8>* %in, i32* %out, i32* %
 ; CHECK-NEXT:    orr w8, w9, w8, lsl #16
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ld = load <4 x i8>, <4 x i8>* %in, align 4
+  %ld = load <4 x i8>, ptr %in, align 4
 
   %e1 = extractelement <4 x i8> %ld, i32 0
   %e2 = extractelement <4 x i8> %ld, i32 1
@@ -653,11 +628,11 @@ define void @short_vector_to_i32_unused_high_i8(<4 x i8>* %in, i32* %out, i32* %
   %i1 = or i32 %s1, %z0
   %i2 = or i32 %i1, %s2
 
-  store i32 %i2, i32* %out
+  store i32 %i2, ptr %out
   ret void
 }
 
-define void @short_vector_to_i32_unused_low_i16(<4 x i8>* %in, i32* %out, i32* %p) {
+define void @short_vector_to_i32_unused_low_i16(ptr %in, ptr %out, ptr %p) {
 ; CHECK-LABEL: short_vector_to_i32_unused_low_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -668,7 +643,7 @@ define void @short_vector_to_i32_unused_low_i16(<4 x i8>* %in, i32* %out, i32* %
 ; CHECK-NEXT:    orr w8, w8, w9, lsl #16
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ld = load <4 x i8>, <4 x i8>* %in, align 4
+  %ld = load <4 x i8>, ptr %in, align 4
 
   %e3 = extractelement <4 x i8> %ld, i32 2
   %e4 = extractelement <4 x i8> %ld, i32 3
@@ -681,18 +656,18 @@ define void @short_vector_to_i32_unused_low_i16(<4 x i8>* %in, i32* %out, i32* %
 
   %i3 = or i32 %s2, %s3
 
-  store i32 %i3, i32* %out
+  store i32 %i3, ptr %out
   ret void
 }
 
 ; x1 = x0[0:1]
-define void @short_vector_to_i32_unused_high_i16(<4 x i8>* %in, i32* %out, i32* %p) {
+define void @short_vector_to_i32_unused_high_i16(ptr %in, ptr %out, ptr %p) {
 ; CHECK-LABEL: short_vector_to_i32_unused_high_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret
-  %ld = load <4 x i8>, <4 x i8>* %in, align 4
+  %ld = load <4 x i8>, ptr %in, align 4
 
   %e1 = extractelement <4 x i8> %ld, i32 0
   %e2 = extractelement <4 x i8> %ld, i32 1
@@ -704,18 +679,18 @@ define void @short_vector_to_i32_unused_high_i16(<4 x i8>* %in, i32* %out, i32*
 
   %i1 = or i32 %s1, %z0
 
-  store i32 %i1, i32* %out
+  store i32 %i1, ptr %out
   ret void
 }
 
 ; x1 = x0
-define void @short_vector_to_i64(<4 x i8>* %in, i64* %out, i64* %p) {
+define void @short_vector_to_i64(ptr %in, ptr %out, ptr %p) {
 ; CHECK-LABEL: short_vector_to_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %ld = load <4 x i8>, <4 x i8>* %in, align 4
+  %ld = load <4 x i8>, ptr %in, align 4
 
   %e1 = extractelement <4 x i8> %ld, i32 0
   %e2 = extractelement <4 x i8> %ld, i32 1
@@ -735,6 +710,6 @@ define void @short_vector_to_i64(<4 x i8>* %in, i64* %out, i64* %p) {
   %i2 = or i64 %i1, %s2
   %i3 = or i64 %i2, %s3
 
-  store i64 %i3, i64* %out
+  store i64 %i3, ptr %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/load-store-forwarding.ll b/llvm/test/CodeGen/AArch64/load-store-forwarding.ll
index 94940a8c4a9d2..02efbe9b409de 100644
--- a/llvm/test/CodeGen/AArch64/load-store-forwarding.ll
+++ b/llvm/test/CodeGen/AArch64/load-store-forwarding.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64_be -o - %s | FileCheck %s --check-prefix CHECK-BE
 ; RUN: llc -mtriple=aarch64 -o - %s | FileCheck %s --check-prefix CHECK-LE
 
-define i8 @test1(i32 %a, i8* %pa) {
+define i8 @test1(i32 %a, ptr %pa) {
 ; CHECK-BE-LABEL: test1:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    mov w8, w0
@@ -14,14 +14,12 @@ define i8 @test1(i32 %a, i8* %pa) {
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    str w0, [x1]
 ; CHECK-LE-NEXT:    ret
-  %p32 = bitcast i8* %pa to i32*
-  %p8 = getelementptr i8, i8* %pa, i32 0
-  store i32 %a, i32* %p32
-  %res = load i8, i8* %p8
+  store i32 %a, ptr %pa
+  %res = load i8, ptr %pa
   ret i8 %res
 }
 
-define i8 @test2(i32 %a, i8* %pa) {
+define i8 @test2(i32 %a, ptr %pa) {
 ; CHECK-BE-LABEL: test2:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    str w0, [x1]
@@ -33,14 +31,13 @@ define i8 @test2(i32 %a, i8* %pa) {
 ; CHECK-LE-NEXT:    str w0, [x1]
 ; CHECK-LE-NEXT:    ubfx w0, w0, #8, #8
 ; CHECK-LE-NEXT:    ret
-  %p32 = bitcast i8* %pa to i32*
-  %p8 = getelementptr i8, i8* %pa, i32 1
-  store i32 %a, i32* %p32
-  %res = load i8, i8* %p8
+  %p8 = getelementptr i8, ptr %pa, i32 1
+  store i32 %a, ptr %pa
+  %res = load i8, ptr %p8
   ret i8 %res
 }
 
-define i8 @test3(i32 %a, i8* %pa) {
+define i8 @test3(i32 %a, ptr %pa) {
 ; CHECK-BE-LABEL: test3:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    str w0, [x1]
@@ -52,14 +49,13 @@ define i8 @test3(i32 %a, i8* %pa) {
 ; CHECK-LE-NEXT:    str w0, [x1]
 ; CHECK-LE-NEXT:    ubfx w0, w0, #16, #8
 ; CHECK-LE-NEXT:    ret
-  %p32 = bitcast i8* %pa to i32*
-  %p8 = getelementptr i8, i8* %pa, i32 2
-  store i32 %a, i32* %p32
-  %res = load i8, i8* %p8
+  %p8 = getelementptr i8, ptr %pa, i32 2
+  store i32 %a, ptr %pa
+  %res = load i8, ptr %p8
   ret i8 %res
 }
 
-define i8 @test4(i32 %a, i8* %pa) {
+define i8 @test4(i32 %a, ptr %pa) {
 ; CHECK-BE-LABEL: test4:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    str w0, [x1]
@@ -70,9 +66,8 @@ define i8 @test4(i32 %a, i8* %pa) {
 ; CHECK-LE-NEXT:    str w0, [x1]
 ; CHECK-LE-NEXT:    lsr w0, w0, #24
 ; CHECK-LE-NEXT:    ret
-  %p32 = bitcast i8* %pa to i32*
-  %p8 = getelementptr i8, i8* %pa, i32 3
-  store i32 %a, i32* %p32
-  %res = load i8, i8* %p8
+  %p8 = getelementptr i8, ptr %pa, i32 3
+  store i32 %a, ptr %pa
+  %res = load i8, ptr %p8
   ret i8 %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/local_vars.ll b/llvm/test/CodeGen/AArch64/local_vars.ll
index 0a53ce5609467..e321b35a5425d 100644
--- a/llvm/test/CodeGen/AArch64/local_vars.ll
+++ b/llvm/test/CodeGen/AArch64/local_vars.ll
@@ -11,7 +11,7 @@
 ; implemented.
 
 @var = global i64 0
- at local_addr = global i64* null
+ at local_addr = global ptr null
 
 declare void @foo()
 
@@ -60,11 +60,11 @@ define void @stack_local() {
 ; CHECK-LABEL: stack_local:
 ; CHECK: sub sp, sp, #16
 
-  %val = load i64, i64* @var
-  store i64 %val, i64* %local_var
+  %val = load i64, ptr @var
+  store i64 %val, ptr %local_var
 ; CHECK-DAG: str {{x[0-9]+}}, [sp, #{{[0-9]+}}]
 
-  store i64* %local_var, i64** @local_addr
+  store ptr %local_var, ptr @local_addr
 ; CHECK-DAG: add {{x[0-9]+}}, sp, #{{[0-9]+}}
 
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/logical-imm.ll b/llvm/test/CodeGen/AArch64/logical-imm.ll
index 6f562230d9374..a1cdf336e47f1 100644
--- a/llvm/test/CodeGen/AArch64/logical-imm.ll
+++ b/llvm/test/CodeGen/AArch64/logical-imm.ll
@@ -7,19 +7,19 @@ define void @test_and(i32 %in32, i64 %in64) {
 ; CHECK-LABEL: test_and:
 
   %val0 = and i32 %in32, 2863311530
-  store volatile i32 %val0, i32* @var32
+  store volatile i32 %val0, ptr @var32
 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xaaaaaaaa
 
   %val1 = and i32 %in32, 4293984240
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xfff0fff0
 
   %val2 = and i64 %in64, 9331882296111890817
-  store volatile i64 %val2, i64* @var64
+  store volatile i64 %val2, ptr @var64
 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0x8181818181818181
 
   %val3 = and i64 %in64, 18429855317404942275
-  store volatile i64 %val3, i64* @var64
+  store volatile i64 %val3, ptr @var64
 ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffc3ffc3ffc3ffc3
 
   ret void
@@ -29,19 +29,19 @@ define void @test_orr(i32 %in32, i64 %in64) {
 ; CHECK-LABEL: test_orr:
 
   %val0 = or i32 %in32, 2863311530
-  store volatile i32 %val0, i32* @var32
+  store volatile i32 %val0, ptr @var32
 ; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, #0xaaaaaaaa
 
   %val1 = or i32 %in32, 4293984240
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, #0xfff0fff0
 
   %val2 = or i64 %in64, 9331882296111890817
-  store volatile i64 %val2, i64* @var64
+  store volatile i64 %val2, ptr @var64
 ; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, #0x8181818181818181
 
   %val3 = or i64 %in64, 18429855317404942275
-  store volatile i64 %val3, i64* @var64
+  store volatile i64 %val3, ptr @var64
 ; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, #0xffc3ffc3ffc3ffc3
 
   ret void
@@ -51,19 +51,19 @@ define void @test_eor(i32 %in32, i64 %in64) {
 ; CHECK-LABEL: test_eor:
 
   %val0 = xor i32 %in32, 2863311530
-  store volatile i32 %val0, i32* @var32
+  store volatile i32 %val0, ptr @var32
 ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, #0xaaaaaaaa
 
   %val1 = xor i32 %in32, 4293984240
-  store volatile i32 %val1, i32* @var32
+  store volatile i32 %val1, ptr @var32
 ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, #0xfff0fff0
 
   %val2 = xor i64 %in64, 9331882296111890817
-  store volatile i64 %val2, i64* @var64
+  store volatile i64 %val2, ptr @var64
 ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, #0x8181818181818181
 
   %val3 = xor i64 %in64, 18429855317404942275
-  store volatile i64 %val3, i64* @var64
+  store volatile i64 %val3, ptr @var64
 ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, #0xffc3ffc3ffc3ffc3
 
   ret void
@@ -72,11 +72,11 @@ define void @test_eor(i32 %in32, i64 %in64) {
 define void @test_mov(i32 %in32, i64 %in64) {
 ; CHECK-LABEL: test_mov:
   %val0 = add i32 %in32, 2863311530
-  store i32 %val0, i32* @var32
+  store i32 %val0, ptr @var32
 ; CHECK: mov {{w[0-9]+}}, #-1431655766
 
   %val1 = add i64 %in64, 11068046444225730969
-  store i64 %val1, i64* @var64
+  store i64 %val1, ptr @var64
 ; CHECK: mov {{x[0-9]+}}, #-7378697629483820647
 
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index 4078863301748..42775e5689449 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -53,62 +53,62 @@ define void @logical_32bit() minsize {
 ; CHECK-NEXT:    str w12, [x8]
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
-  %val1 = load i32, i32* @var1_32
-  %val2 = load i32, i32* @var2_32
+  %val1 = load i32, ptr @var1_32
+  %val2 = load i32, ptr @var2_32
 
   ; First check basic and/bic/or/orn/eor/eon patterns with no shift
   %neg_val2 = xor i32 -1, %val2
 
   %and_noshift = and i32 %val1, %val2
-  store volatile i32 %and_noshift, i32* @var1_32
+  store volatile i32 %and_noshift, ptr @var1_32
   %bic_noshift = and i32 %neg_val2, %val1
-  store volatile i32 %bic_noshift, i32* @var1_32
+  store volatile i32 %bic_noshift, ptr @var1_32
 
   %or_noshift = or i32 %val1, %val2
-  store volatile i32 %or_noshift, i32* @var1_32
+  store volatile i32 %or_noshift, ptr @var1_32
   %orn_noshift = or i32 %neg_val2, %val1
-  store volatile i32 %orn_noshift, i32* @var1_32
+  store volatile i32 %orn_noshift, ptr @var1_32
 
   %xor_noshift = xor i32 %val1, %val2
-  store volatile i32 %xor_noshift, i32* @var1_32
+  store volatile i32 %xor_noshift, ptr @var1_32
   %xorn_noshift = xor i32 %neg_val2, %val1
-  store volatile i32 %xorn_noshift, i32* @var1_32
+  store volatile i32 %xorn_noshift, ptr @var1_32
 
   ; Check the maximum shift on each
   %operand_lsl31 = shl i32 %val2, 31
   %neg_operand_lsl31 = xor i32 -1, %operand_lsl31
 
   %and_lsl31 = and i32 %val1, %operand_lsl31
-  store volatile i32 %and_lsl31, i32* @var1_32
+  store volatile i32 %and_lsl31, ptr @var1_32
   %bic_lsl31 = and i32 %val1, %neg_operand_lsl31
-  store volatile i32 %bic_lsl31, i32* @var1_32
+  store volatile i32 %bic_lsl31, ptr @var1_32
 
   %or_lsl31 = or i32 %val1, %operand_lsl31
-  store volatile i32 %or_lsl31, i32* @var1_32
+  store volatile i32 %or_lsl31, ptr @var1_32
   %orn_lsl31 = or i32 %val1, %neg_operand_lsl31
-  store volatile i32 %orn_lsl31, i32* @var1_32
+  store volatile i32 %orn_lsl31, ptr @var1_32
 
   %xor_lsl31 = xor i32 %val1, %operand_lsl31
-  store volatile i32 %xor_lsl31, i32* @var1_32
+  store volatile i32 %xor_lsl31, ptr @var1_32
   %xorn_lsl31 = xor i32 %val1, %neg_operand_lsl31
-  store volatile i32 %xorn_lsl31, i32* @var1_32
+  store volatile i32 %xorn_lsl31, ptr @var1_32
 
   ; Check other shifts on a subset
   %operand_asr10 = ashr i32 %val2, 10
   %neg_operand_asr10 = xor i32 -1, %operand_asr10
 
   %bic_asr10 = and i32 %val1, %neg_operand_asr10
-  store volatile i32 %bic_asr10, i32* @var1_32
+  store volatile i32 %bic_asr10, ptr @var1_32
   %xor_asr10 = xor i32 %val1, %operand_asr10
-  store volatile i32 %xor_asr10, i32* @var1_32
+  store volatile i32 %xor_asr10, ptr @var1_32
 
   %operand_lsr1 = lshr i32 %val2, 1
   %neg_operand_lsr1 = xor i32 -1, %operand_lsr1
 
   %orn_lsr1 = or i32 %val1, %neg_operand_lsr1
-  store volatile i32 %orn_lsr1, i32* @var1_32
+  store volatile i32 %orn_lsr1, ptr @var1_32
   %xor_lsr1 = xor i32 %val1, %operand_lsr1
-  store volatile i32 %xor_lsr1, i32* @var1_32
+  store volatile i32 %xor_lsr1, ptr @var1_32
 
   %operand_ror20_big = shl i32 %val2, 12
   %operand_ror20_small = lshr i32 %val2, 20
@@ -116,9 +116,9 @@ define void @logical_32bit() minsize {
   %neg_operand_ror20 = xor i32 -1, %operand_ror20
 
   %xorn_ror20 = xor i32 %val1, %neg_operand_ror20
-  store volatile i32 %xorn_ror20, i32* @var1_32
+  store volatile i32 %xorn_ror20, ptr @var1_32
   %and_ror20 = and i32 %val1, %operand_ror20
-  store volatile i32 %and_ror20, i32* @var1_32
+  store volatile i32 %and_ror20, ptr @var1_32
 
   ret void
 }
@@ -169,62 +169,62 @@ define void @logical_64bit() minsize {
 ; CHECK-NEXT:    str x12, [x8]
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val1 = load i64, i64* @var1_64
-  %val2 = load i64, i64* @var2_64
+  %val1 = load i64, ptr @var1_64
+  %val2 = load i64, ptr @var2_64
 
   ; First check basic and/bic/or/orn/eor/eon patterns with no shift
   %neg_val2 = xor i64 -1, %val2
 
   %and_noshift = and i64 %val1, %val2
-  store volatile i64 %and_noshift, i64* @var1_64
+  store volatile i64 %and_noshift, ptr @var1_64
   %bic_noshift = and i64 %neg_val2, %val1
-  store volatile i64 %bic_noshift, i64* @var1_64
+  store volatile i64 %bic_noshift, ptr @var1_64
 
   %or_noshift = or i64 %val1, %val2
-  store volatile i64 %or_noshift, i64* @var1_64
+  store volatile i64 %or_noshift, ptr @var1_64
   %orn_noshift = or i64 %neg_val2, %val1
-  store volatile i64 %orn_noshift, i64* @var1_64
+  store volatile i64 %orn_noshift, ptr @var1_64
 
   %xor_noshift = xor i64 %val1, %val2
-  store volatile i64 %xor_noshift, i64* @var1_64
+  store volatile i64 %xor_noshift, ptr @var1_64
   %xorn_noshift = xor i64 %neg_val2, %val1
-  store volatile i64 %xorn_noshift, i64* @var1_64
+  store volatile i64 %xorn_noshift, ptr @var1_64
 
   ; Check the maximum shift on each
   %operand_lsl63 = shl i64 %val2, 63
   %neg_operand_lsl63 = xor i64 -1, %operand_lsl63
 
   %and_lsl63 = and i64 %val1, %operand_lsl63
-  store volatile i64 %and_lsl63, i64* @var1_64
+  store volatile i64 %and_lsl63, ptr @var1_64
   %bic_lsl63 = and i64 %val1, %neg_operand_lsl63
-  store volatile i64 %bic_lsl63, i64* @var1_64
+  store volatile i64 %bic_lsl63, ptr @var1_64
 
   %or_lsl63 = or i64 %val1, %operand_lsl63
-  store volatile i64 %or_lsl63, i64* @var1_64
+  store volatile i64 %or_lsl63, ptr @var1_64
   %orn_lsl63 = or i64 %val1, %neg_operand_lsl63
-  store volatile i64 %orn_lsl63, i64* @var1_64
+  store volatile i64 %orn_lsl63, ptr @var1_64
 
   %xor_lsl63 = xor i64 %val1, %operand_lsl63
-  store volatile i64 %xor_lsl63, i64* @var1_64
+  store volatile i64 %xor_lsl63, ptr @var1_64
   %xorn_lsl63 = xor i64 %val1, %neg_operand_lsl63
-  store volatile i64 %xorn_lsl63, i64* @var1_64
+  store volatile i64 %xorn_lsl63, ptr @var1_64
 
   ; Check other shifts on a subset
   %operand_asr10 = ashr i64 %val2, 10
   %neg_operand_asr10 = xor i64 -1, %operand_asr10
 
   %bic_asr10 = and i64 %val1, %neg_operand_asr10
-  store volatile i64 %bic_asr10, i64* @var1_64
+  store volatile i64 %bic_asr10, ptr @var1_64
   %xor_asr10 = xor i64 %val1, %operand_asr10
-  store volatile i64 %xor_asr10, i64* @var1_64
+  store volatile i64 %xor_asr10, ptr @var1_64
 
   %operand_lsr1 = lshr i64 %val2, 1
   %neg_operand_lsr1 = xor i64 -1, %operand_lsr1
 
   %orn_lsr1 = or i64 %val1, %neg_operand_lsr1
-  store volatile i64 %orn_lsr1, i64* @var1_64
+  store volatile i64 %orn_lsr1, ptr @var1_64
   %xor_lsr1 = xor i64 %val1, %operand_lsr1
-  store volatile i64 %xor_lsr1, i64* @var1_64
+  store volatile i64 %xor_lsr1, ptr @var1_64
 
   ; Construct a rotate-right from a bunch of other logical
   ; operations. DAGCombiner should ensure we the ROTR during
@@ -235,9 +235,9 @@ define void @logical_64bit() minsize {
   %neg_operand_ror20 = xor i64 -1, %operand_ror20
 
   %xorn_ror20 = xor i64 %val1, %neg_operand_ror20
-  store volatile i64 %xorn_ror20, i64* @var1_64
+  store volatile i64 %xorn_ror20, ptr @var1_64
   %and_ror20 = and i64 %val1, %operand_ror20
-  store volatile i64 %and_ror20, i64* @var1_64
+  store volatile i64 %and_ror20, ptr @var1_64
 
   ret void
 }
@@ -263,8 +263,8 @@ define void @flag_setting() {
 ; CHECK-NEXT:  .LBB2_3: // %other_exit
 ; CHECK-NEXT:    str x9, [x8]
 ; CHECK-NEXT:    ret
-  %val1 = load i64, i64* @var1_64
-  %val2 = load i64, i64* @var2_64
+  %val1 = load i64, ptr @var1_64
+  %val2 = load i64, ptr @var2_64
 
   %simple_and = and i64 %val1, %val2
   %tst1 = icmp sgt i64 %simple_and, 0
@@ -283,7 +283,7 @@ test3:
   br i1 %tst3, label %ret, label %other_exit, !prof !1
 
 other_exit:
-  store volatile i64 %val1, i64* @var1_64
+  store volatile i64 %val1, ptr @var1_64
   ret void
 ret:
   ret void
@@ -301,7 +301,7 @@ define i64 @add_swap_rhs_lhs_i64(i64 %0, i64 %1) {
   ret i64 %5
 }
 
-define i64 @add_swap_no_op_i64(i64 %0, i64 %1, i64* %2) {
+define i64 @add_swap_no_op_i64(i64 %0, i64 %1, ptr %2) {
 ; CHECK-LABEL: add_swap_no_op_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl x8, x1, #3
@@ -310,7 +310,7 @@ define i64 @add_swap_no_op_i64(i64 %0, i64 %1, i64* %2) {
 ; CHECK-NEXT:    ret
   %4 = shl i64 %0, 8
   %5 = shl i64 %1, 3
-  store i64 %5, i64* %2
+  store i64 %5, ptr %2
   %6 = add i64 %5, %4
   ret i64 %6
 }
@@ -327,7 +327,7 @@ define i32 @add_swap_rhs_lhs_i32(i32 %0, i32 %1) {
   ret i32 %5
 }
 
-define i32 @add_swap_no_op_i32(i32 %0, i32 %1, i32* %2) {
+define i32 @add_swap_no_op_i32(i32 %0, i32 %1, ptr %2) {
 ; CHECK-LABEL: add_swap_no_op_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl w8, w1, #3
@@ -336,7 +336,7 @@ define i32 @add_swap_no_op_i32(i32 %0, i32 %1, i32* %2) {
 ; CHECK-NEXT:    ret
   %4 = shl i32 %0, 8
   %5 = shl i32 %1, 3
-  store i32 %5, i32* %2
+  store i32 %5, ptr %2
   %6 = add i32 %5, %4
   ret i32 %6
 }

diff  --git a/llvm/test/CodeGen/AArch64/loop-micro-op-buffer-size-t99.ll b/llvm/test/CodeGen/AArch64/loop-micro-op-buffer-size-t99.ll
index 73aae534299be..75786728cd0db 100644
--- a/llvm/test/CodeGen/AArch64/loop-micro-op-buffer-size-t99.ll
+++ b/llvm/test/CodeGen/AArch64/loop-micro-op-buffer-size-t99.ll
@@ -28,7 +28,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; CHECK-NOT: %val = add i32 %counter, 10
 ; CHECK: %counter.2 = phi i32 [ 0, %exit.0 ], [ %inc.2.3, %loop.2.inc.3 ]
 
-define void @foo(i32 * %out) {
+define void @foo(ptr %out) {
 entry:
   %0 = alloca [1024 x i32]
   %x0 = alloca [1024 x i32]
@@ -45,26 +45,26 @@ loop.header:
   br label %loop.body
 
 loop.body:
-  %ptr = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 %counter
-  store i32 %counter, i32* %ptr
+  %ptr = getelementptr [1024 x i32], ptr %0, i32 0, i32 %counter
+  store i32 %counter, ptr %ptr
   %val = add i32 %counter, 5
-  %xptr = getelementptr [1024 x i32], [1024 x i32]* %x0, i32 0, i32 %counter
-  store i32 %val, i32* %xptr
+  %xptr = getelementptr [1024 x i32], ptr %x0, i32 0, i32 %counter
+  store i32 %val, ptr %xptr
   %val1 = add i32 %counter, 6
-  %xptr1 = getelementptr [1024 x i32], [1024 x i32]* %x01, i32 0, i32 %counter
-  store i32 %val1, i32* %xptr1
+  %xptr1 = getelementptr [1024 x i32], ptr %x01, i32 0, i32 %counter
+  store i32 %val1, ptr %xptr1
   %val2 = add i32 %counter, 7
-  %xptr2 = getelementptr [1024 x i32], [1024 x i32]* %x02, i32 0, i32 %counter
-  store i32 %val2, i32* %xptr2
+  %xptr2 = getelementptr [1024 x i32], ptr %x02, i32 0, i32 %counter
+  store i32 %val2, ptr %xptr2
   %val3 = add i32 %counter, 8
-  %xptr3 = getelementptr [1024 x i32], [1024 x i32]* %x03, i32 0, i32 %counter
-  store i32 %val3, i32* %xptr3
+  %xptr3 = getelementptr [1024 x i32], ptr %x03, i32 0, i32 %counter
+  store i32 %val3, ptr %xptr3
   %val4 = add i32 %counter, 9
-  %xptr4 = getelementptr [1024 x i32], [1024 x i32]* %x04, i32 0, i32 %counter
-  store i32 %val4, i32* %xptr4
+  %xptr4 = getelementptr [1024 x i32], ptr %x04, i32 0, i32 %counter
+  store i32 %val4, ptr %xptr4
   %val5 = add i32 %counter, 10
-  %xptr5 = getelementptr [1024 x i32], [1024 x i32]* %x05, i32 0, i32 %counter
-  store i32 %val5, i32* %xptr5
+  %xptr5 = getelementptr [1024 x i32], ptr %x05, i32 0, i32 %counter
+  store i32 %val5, ptr %xptr5
   br label %loop.inc
 
 loop.inc:
@@ -73,9 +73,9 @@ loop.inc:
   br i1 %1, label  %exit.0, label %loop.header
 
 exit.0:
-  %2 = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 5
-  %3 = load i32, i32* %2
-  store i32 %3, i32 * %out
+  %2 = getelementptr [1024 x i32], ptr %0, i32 0, i32 5
+  %3 = load i32, ptr %2
+  store i32 %3, ptr %out
   br label %loop.2.header
 
 
@@ -84,28 +84,28 @@ loop.2.header:
   br label %loop.2.body
 
 loop.2.body:
-  %ptr.2 = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 %counter.2
-  store i32 %counter.2, i32* %ptr.2
+  %ptr.2 = getelementptr [1024 x i32], ptr %0, i32 0, i32 %counter.2
+  store i32 %counter.2, ptr %ptr.2
   %val.2 = add i32 %counter.2, 5
-  %xptr.2 = getelementptr [1024 x i32], [1024 x i32]* %x0, i32 0, i32 %counter.2
-  store i32 %val.2, i32* %xptr.2
+  %xptr.2 = getelementptr [1024 x i32], ptr %x0, i32 0, i32 %counter.2
+  store i32 %val.2, ptr %xptr.2
   %val1.2 = add i32 %counter.2, 6
-  %xptr1.2 = getelementptr [1024 x i32], [1024 x i32]* %x01, i32 0, i32 %counter.2
-  store i32 %val1, i32* %xptr1.2
+  %xptr1.2 = getelementptr [1024 x i32], ptr %x01, i32 0, i32 %counter.2
+  store i32 %val1, ptr %xptr1.2
   %val2.2 = add i32 %counter.2, 7
-  %xptr2.2 = getelementptr [1024 x i32], [1024 x i32]* %x02, i32 0, i32 %counter.2
-  store i32 %val2, i32* %xptr2.2
+  %xptr2.2 = getelementptr [1024 x i32], ptr %x02, i32 0, i32 %counter.2
+  store i32 %val2, ptr %xptr2.2
   %val3.2 = add i32 %counter.2, 8
-  %xptr3.2 = getelementptr [1024 x i32], [1024 x i32]* %x03, i32 0, i32 %counter.2
-  store i32 %val3.2, i32* %xptr3.2
+  %xptr3.2 = getelementptr [1024 x i32], ptr %x03, i32 0, i32 %counter.2
+  store i32 %val3.2, ptr %xptr3.2
   %val4.2 = add i32 %counter.2, 9
-  %xptr4.2 = getelementptr [1024 x i32], [1024 x i32]* %x04, i32 0, i32 %counter.2
-  store i32 %val4.2, i32* %xptr4.2
+  %xptr4.2 = getelementptr [1024 x i32], ptr %x04, i32 0, i32 %counter.2
+  store i32 %val4.2, ptr %xptr4.2
   %val5.2 = add i32 %counter.2, 10
-  %xptr5.2 = getelementptr [1024 x i32], [1024 x i32]* %x05, i32 0, i32 %counter.2
-  store i32 %val5.2, i32* %xptr5.2
-  %xptr6.2 = getelementptr [1024 x i32], [1024 x i32]* %x06, i32 0, i32 %counter.2
-  store i32 %val5.2, i32* %xptr6.2
+  %xptr5.2 = getelementptr [1024 x i32], ptr %x05, i32 0, i32 %counter.2
+  store i32 %val5.2, ptr %xptr5.2
+  %xptr6.2 = getelementptr [1024 x i32], ptr %x06, i32 0, i32 %counter.2
+  store i32 %val5.2, ptr %xptr6.2
   br label %loop.2.inc
 
 loop.2.inc:
@@ -114,9 +114,9 @@ loop.2.inc:
   br i1 %4, label  %exit.2, label %loop.2.header
 
 exit.2:
-  %x2 = getelementptr [1024 x i32], [1024 x i32]* %0, i32 0, i32 6
-  %x3 = load i32, i32* %x2
-  %out2 = getelementptr i32, i32 * %out, i32 1
-  store i32 %3, i32 * %out2
+  %x2 = getelementptr [1024 x i32], ptr %0, i32 0, i32 6
+  %x3 = load i32, ptr %x2
+  %out2 = getelementptr i32, ptr %out, i32 1
+  store i32 %3, ptr %out2
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/lower-ptrmask.ll b/llvm/test/CodeGen/AArch64/lower-ptrmask.ll
index 12cce0677aefa..aceabf27d083f 100644
--- a/llvm/test/CodeGen/AArch64/lower-ptrmask.ll
+++ b/llvm/test/CodeGen/AArch64/lower-ptrmask.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=arm64-apple-iphoneos -stop-after=finalize-isel %s -o - | FileCheck %s
 
-declare i8* @llvm.ptrmask.p0i8.i64(i8* , i64)
+declare ptr @llvm.ptrmask.p0.i64(ptr , i64)
 
 ; CHECK-LABEL: name: test1
 ; CHECK:         %0:gpr64 = COPY $x0
@@ -8,12 +8,12 @@ declare i8* @llvm.ptrmask.p0i8.i64(i8* , i64)
 ; CHECK-NEXT:    $x0 = COPY %1
 ; CHECK-NEXT:    RET_ReallyLR implicit $x0
 
-define i8* @test1(i8* %src) {
-  %ptr = call i8* @llvm.ptrmask.p0i8.i64(i8* %src, i64 72057594037927928)
-  ret i8* %ptr
+define ptr @test1(ptr %src) {
+  %ptr = call ptr @llvm.ptrmask.p0.i64(ptr %src, i64 72057594037927928)
+  ret ptr %ptr
 }
 
-declare i8* @llvm.ptrmask.p0i8.i32(i8*, i32)
+declare ptr @llvm.ptrmask.p0.i32(ptr, i32)
 
 ; CHECK-LABEL: name: test2
 ; CHECK:         %0:gpr64 = COPY $x0
@@ -23,7 +23,7 @@ declare i8* @llvm.ptrmask.p0i8.i32(i8*, i32)
 ; CHECK-NEXT:    $x0 = COPY %3
 ; CHECK-NEXT:    RET_ReallyLR implicit $x0
 
-define i8* @test2(i8* %src) {
-  %ptr = call i8* @llvm.ptrmask.p0i8.i32(i8* %src, i32 10000)
-  ret i8* %ptr
+define ptr @test2(ptr %src) {
+  %ptr = call ptr @llvm.ptrmask.p0.i32(ptr %src, i32 10000)
+  ret ptr %ptr
 }

diff  --git a/llvm/test/CodeGen/AArch64/lowerMUL-newload.ll b/llvm/test/CodeGen/AArch64/lowerMUL-newload.ll
index b72422be759fb..8bce4c553755e 100644
--- a/llvm/test/CodeGen/AArch64/lowerMUL-newload.ll
+++ b/llvm/test/CodeGen/AArch64/lowerMUL-newload.ll
@@ -36,7 +36,7 @@ entry:
   ret <4 x i32> %v5
 }
 
-define void @mlai16_loadstore(i16* %a, i16* %b, i16* %c) {
+define void @mlai16_loadstore(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: mlai16_loadstore:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, #16]
@@ -48,24 +48,20 @@ define void @mlai16_loadstore(i16* %a, i16* %b, i16* %c) {
 ; CHECK-NEXT:    str d0, [x0, #16]
 ; CHECK-NEXT:    ret
 entry:
-  %scevgep0 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
-  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
+  %scevgep0 = getelementptr i16, ptr %a, i32 8
+  %vec0 = load <4 x i16>, ptr %scevgep0, align 8
   %v0 = sext <4 x i16> %vec0 to <4 x i32>
-  %scevgep1 = getelementptr i16, i16* %b, i32 8
-  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
-  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+  %scevgep1 = getelementptr i16, ptr %b, i32 8
+  %vec1 = load <4 x i16>, ptr %scevgep1, align 8
   %v1 = sext <4 x i16> %vec1 to <4 x i32>
-  %scevgep2 = getelementptr i16, i16* %c, i32 8
-  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+  %scevgep2 = getelementptr i16, ptr %c, i32 8
+  %vec2 = load <4 x i16>, ptr %scevgep2, align 8
   %v2 = sext <4 x i16> %vec2 to <4 x i32>
   %v3 = mul <4 x i32> %v1, %v0
   %v4 = add <4 x i32> %v3, %v2
   %v5 = trunc <4 x i32> %v4 to <4 x i16>
-  %scevgep3 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
-  store <4 x i16> %v5, <4 x i16>* %vector_ptr3, align 8
+  %scevgep3 = getelementptr i16, ptr %a, i32 8
+  store <4 x i16> %v5, ptr %scevgep3, align 8
   ret void
 }
 
@@ -104,7 +100,7 @@ entry:
   ret <4 x i32> %v5
 }
 
-define void @addmuli16_loadstore(i16* %a, i16* %b, i16* %c) {
+define void @addmuli16_loadstore(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: addmuli16_loadstore:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x1, #16]
@@ -116,24 +112,20 @@ define void @addmuli16_loadstore(i16* %a, i16* %b, i16* %c) {
 ; CHECK-NEXT:    str d0, [x0, #16]
 ; CHECK-NEXT:    ret
 entry:
-  %scevgep0 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
-  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
+  %scevgep0 = getelementptr i16, ptr %a, i32 8
+  %vec0 = load <4 x i16>, ptr %scevgep0, align 8
   %v0 = sext <4 x i16> %vec0 to <4 x i32>
-  %scevgep1 = getelementptr i16, i16* %b, i32 8
-  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
-  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+  %scevgep1 = getelementptr i16, ptr %b, i32 8
+  %vec1 = load <4 x i16>, ptr %scevgep1, align 8
   %v1 = sext <4 x i16> %vec1 to <4 x i32>
-  %scevgep2 = getelementptr i16, i16* %c, i32 8
-  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+  %scevgep2 = getelementptr i16, ptr %c, i32 8
+  %vec2 = load <4 x i16>, ptr %scevgep2, align 8
   %v2 = sext <4 x i16> %vec2 to <4 x i32>
   %v3 = add <4 x i32> %v1, %v0
   %v4 = mul <4 x i32> %v3, %v2
   %v5 = trunc <4 x i32> %v4 to <4 x i16>
-  %scevgep3 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
-  store <4 x i16> %v5, <4 x i16>* %vector_ptr3, align 8
+  %scevgep3 = getelementptr i16, ptr %a, i32 8
+  store <4 x i16> %v5, ptr %scevgep3, align 8
   ret void
 }
 
@@ -172,7 +164,7 @@ entry:
   ret <2 x i64> %v5
 }
 
-define void @mlai32_loadstore(i32* %a, i32* %b, i32* %c) {
+define void @mlai32_loadstore(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: mlai32_loadstore:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x0, #32]
@@ -184,24 +176,20 @@ define void @mlai32_loadstore(i32* %a, i32* %b, i32* %c) {
 ; CHECK-NEXT:    str d0, [x0, #32]
 ; CHECK-NEXT:    ret
 entry:
-  %scevgep0 = getelementptr i32, i32* %a, i32 8
-  %vector_ptr0 = bitcast i32* %scevgep0 to <2 x i32>*
-  %vec0 = load <2 x i32>, <2 x i32>* %vector_ptr0, align 8
+  %scevgep0 = getelementptr i32, ptr %a, i32 8
+  %vec0 = load <2 x i32>, ptr %scevgep0, align 8
   %v0 = sext <2 x i32> %vec0 to <2 x i64>
-  %scevgep1 = getelementptr i32, i32* %b, i32 8
-  %vector_ptr1 = bitcast i32* %scevgep1 to <2 x i32>*
-  %vec1 = load <2 x i32>, <2 x i32>* %vector_ptr1, align 8
+  %scevgep1 = getelementptr i32, ptr %b, i32 8
+  %vec1 = load <2 x i32>, ptr %scevgep1, align 8
   %v1 = sext <2 x i32> %vec1 to <2 x i64>
-  %scevgep2 = getelementptr i32, i32* %c, i32 8
-  %vector_ptr2 = bitcast i32* %scevgep2 to <2 x i32>*
-  %vec2 = load <2 x i32>, <2 x i32>* %vector_ptr2, align 8
+  %scevgep2 = getelementptr i32, ptr %c, i32 8
+  %vec2 = load <2 x i32>, ptr %scevgep2, align 8
   %v2 = sext <2 x i32> %vec2 to <2 x i64>
   %v3 = mul <2 x i64> %v1, %v0
   %v4 = add <2 x i64> %v3, %v2
   %v5 = trunc <2 x i64> %v4 to <2 x i32>
-  %scevgep3 = getelementptr i32, i32* %a, i32 8
-  %vector_ptr3 = bitcast i32* %scevgep3 to <2 x i32>*
-  store <2 x i32> %v5, <2 x i32>* %vector_ptr3, align 8
+  %scevgep3 = getelementptr i32, ptr %a, i32 8
+  store <2 x i32> %v5, ptr %scevgep3, align 8
   ret void
 }
 
@@ -240,7 +228,7 @@ entry:
   ret <2 x i64> %v5
 }
 
-define void @addmuli32_loadstore(i32* %a, i32* %b, i32* %c) {
+define void @addmuli32_loadstore(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: addmuli32_loadstore:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x1, #32]
@@ -252,28 +240,24 @@ define void @addmuli32_loadstore(i32* %a, i32* %b, i32* %c) {
 ; CHECK-NEXT:    str d0, [x0, #32]
 ; CHECK-NEXT:    ret
 entry:
-  %scevgep0 = getelementptr i32, i32* %a, i32 8
-  %vector_ptr0 = bitcast i32* %scevgep0 to <2 x i32>*
-  %vec0 = load <2 x i32>, <2 x i32>* %vector_ptr0, align 8
+  %scevgep0 = getelementptr i32, ptr %a, i32 8
+  %vec0 = load <2 x i32>, ptr %scevgep0, align 8
   %v0 = sext <2 x i32> %vec0 to <2 x i64>
-  %scevgep1 = getelementptr i32, i32* %b, i32 8
-  %vector_ptr1 = bitcast i32* %scevgep1 to <2 x i32>*
-  %vec1 = load <2 x i32>, <2 x i32>* %vector_ptr1, align 8
+  %scevgep1 = getelementptr i32, ptr %b, i32 8
+  %vec1 = load <2 x i32>, ptr %scevgep1, align 8
   %v1 = sext <2 x i32> %vec1 to <2 x i64>
-  %scevgep2 = getelementptr i32, i32* %c, i32 8
-  %vector_ptr2 = bitcast i32* %scevgep2 to <2 x i32>*
-  %vec2 = load <2 x i32>, <2 x i32>* %vector_ptr2, align 8
+  %scevgep2 = getelementptr i32, ptr %c, i32 8
+  %vec2 = load <2 x i32>, ptr %scevgep2, align 8
   %v2 = sext <2 x i32> %vec2 to <2 x i64>
   %v3 = add <2 x i64> %v1, %v0
   %v4 = mul <2 x i64> %v3, %v2
   %v5 = trunc <2 x i64> %v4 to <2 x i32>
-  %scevgep3 = getelementptr i32, i32* %a, i32 8
-  %vector_ptr3 = bitcast i32* %scevgep3 to <2 x i32>*
-  store <2 x i32> %v5, <2 x i32>* %vector_ptr3, align 8
+  %scevgep3 = getelementptr i32, ptr %a, i32 8
+  store <2 x i32> %v5, ptr %scevgep3, align 8
   ret void
 }
 
-define void @func1(i16* %a, i16* %b, i16* %c) {
+define void @func1(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: func1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x2, #16]
@@ -296,9 +280,9 @@ define void @func1(i16* %a, i16* %b, i16* %c) {
 entry:
 ; The test case trying to vectorize the pseudo code below.
 ; a[i] = b[i] + c[i];
-; b[i] = a[i] * c[i];
-; a[i] = b[i] + a[i] * c[i];
-; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i]" is
+; b[i] = aptr c[i];
+; a[i] = b[i] + aptr c[i];
+; Checking that vector load a[i] for "a[i] = b[i] + aptr c[i]" is
 ; scheduled before the first vector store to "a[i] = b[i] + c[i]".
 ; Checking that there is no vector load a[i] scheduled between the vector
 ; stores to a[i], otherwise the load of a[i] will be polluted by the first
@@ -307,42 +291,34 @@ entry:
 ; lowerMUL for the new created Load SDNode.
 
 
-  %scevgep0 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
-  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
-  %scevgep1 = getelementptr i16, i16* %b, i32 8
-  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
-  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+  %scevgep0 = getelementptr i16, ptr %a, i32 8
+  %vec0 = load <4 x i16>, ptr %scevgep0, align 8
+  %scevgep1 = getelementptr i16, ptr %b, i32 8
+  %vec1 = load <4 x i16>, ptr %scevgep1, align 8
   %0 = zext <4 x i16> %vec1 to <4 x i32>
-  %scevgep2 = getelementptr i16, i16* %c, i32 8
-  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+  %scevgep2 = getelementptr i16, ptr %c, i32 8
+  %vec2 = load <4 x i16>, ptr %scevgep2, align 8
   %1 = sext <4 x i16> %vec2 to <4 x i32>
   %vec3 = add <4 x i32> %1, %0
   %2 = trunc <4 x i32> %vec3 to <4 x i16>
-  %scevgep3 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
-  store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
-  %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
+  %scevgep3 = getelementptr i16, ptr %a, i32 8
+  store <4 x i16> %2, ptr %scevgep3, align 8
+  %vec4 = load <4 x i16>, ptr %scevgep2, align 8
   %3 = sext <4 x i16> %vec4 to <4 x i32>
   %vec5 = mul <4 x i32> %3, %vec3
   %4 = trunc <4 x i32> %vec5 to <4 x i16>
-  %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
-  store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
+  store <4 x i16> %4, ptr %scevgep1, align 8
   %5 = sext <4 x i16> %vec0 to <4 x i32>
-  %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
+  %vec6 = load <4 x i16>, ptr %scevgep2, align 8
   %6 = sext <4 x i16> %vec6 to <4 x i32>
   %vec7 = mul <4 x i32> %6, %5
   %vec8 = add <4 x i32> %vec7, %vec5
   %7 = trunc <4 x i32> %vec8 to <4 x i16>
-  %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
-  store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
+  store <4 x i16> %7, ptr %scevgep3, align 8
   ret void
 }
 
-define void @func2(i16* %a, i16* %b, i16* %c) {
+define void @func2(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: func2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr d0, [x2, #16]
@@ -366,9 +342,9 @@ define void @func2(i16* %a, i16* %b, i16* %c) {
 entry:
 ; The test case trying to vectorize the pseudo code below.
 ; a[i] = b[i] + c[i];
-; b[i] = a[i] * c[i];
-; a[i] = b[i] + a[i] * c[i] + a[i];
-; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i] + a[i]"
+; b[i] = aptr c[i];
+; a[i] = b[i] + aptr c[i] + a[i];
+; Checking that vector load a[i] for "a[i] = b[i] + aptr c[i] + a[i]"
 ; is scheduled before the first vector store to "a[i] = b[i] + c[i]".
 ; Checking that there is no vector load a[i] scheduled between the first
 ; vector store to a[i] and the vector add of a[i], otherwise the load of
@@ -377,38 +353,30 @@ entry:
 ; Load SDNode are updated during lowerMUL.
 
 
-  %scevgep0 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
-  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
-  %scevgep1 = getelementptr i16, i16* %b, i32 8
-  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
-  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+  %scevgep0 = getelementptr i16, ptr %a, i32 8
+  %vec0 = load <4 x i16>, ptr %scevgep0, align 8
+  %scevgep1 = getelementptr i16, ptr %b, i32 8
+  %vec1 = load <4 x i16>, ptr %scevgep1, align 8
   %0 = zext <4 x i16> %vec1 to <4 x i32>
-  %scevgep2 = getelementptr i16, i16* %c, i32 8
-  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+  %scevgep2 = getelementptr i16, ptr %c, i32 8
+  %vec2 = load <4 x i16>, ptr %scevgep2, align 8
   %1 = sext <4 x i16> %vec2 to <4 x i32>
   %vec3 = add <4 x i32> %1, %0
   %2 = trunc <4 x i32> %vec3 to <4 x i16>
-  %scevgep3 = getelementptr i16, i16* %a, i32 8
-  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
-  store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
-  %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
+  %scevgep3 = getelementptr i16, ptr %a, i32 8
+  store <4 x i16> %2, ptr %scevgep3, align 8
+  %vec4 = load <4 x i16>, ptr %scevgep2, align 8
   %3 = sext <4 x i16> %vec4 to <4 x i32>
   %vec5 = mul <4 x i32> %3, %vec3
   %4 = trunc <4 x i32> %vec5 to <4 x i16>
-  %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
-  store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
+  store <4 x i16> %4, ptr %scevgep1, align 8
   %5 = sext <4 x i16> %vec0 to <4 x i32>
-  %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
-  %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
+  %vec6 = load <4 x i16>, ptr %scevgep2, align 8
   %6 = sext <4 x i16> %vec6 to <4 x i32>
   %vec7 = mul <4 x i32> %6, %5
   %vec8 = add <4 x i32> %vec7, %vec5
   %vec9 = add <4 x i32> %vec8, %5
   %7 = trunc <4 x i32> %vec9 to <4 x i16>
-  %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
-  store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
+  store <4 x i16> %7, ptr %scevgep3, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll b/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
index fd6f43fbc1dc7..ed91f4adb8d3f 100644
--- a/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
@@ -3,7 +3,7 @@
 
 %struct.foo = type { [8 x i64] }
 
-define void @load(%struct.foo* %output, i8* %addr) {
+define void @load(ptr %output, ptr %addr) {
 ; CHECK-LABEL: load:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    //APP
@@ -15,13 +15,12 @@ define void @load(%struct.foo* %output, i8* %addr) {
 ; CHECK-NEXT:    stp x2, x3, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %val = call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(i8* %addr)
-  %outcast = bitcast %struct.foo* %output to i512*
-  store i512 %val, i512* %outcast, align 8
+  %val = call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(ptr %addr)
+  store i512 %val, ptr %output, align 8
   ret void
 }
 
-define void @store(%struct.foo* %input, i8* %addr) {
+define void @store(ptr %input, ptr %addr) {
 ; CHECK-LABEL: store:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldp x8, x9, [x0, #48]
@@ -33,13 +32,12 @@ define void @store(%struct.foo* %input, i8* %addr) {
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    ret
 entry:
-  %incast = bitcast %struct.foo* %input to i512*
-  %val = load i512, i512* %incast, align 8
-  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %val, i8* %addr)
+  %val = load i512, ptr %input, align 8
+  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %val, ptr %addr)
   ret void
 }
 
-define void @store2(i32* %in, i8* %addr) {
+define void @store2(ptr %in, ptr %addr) {
 ; CHECK-LABEL: store2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -57,28 +55,28 @@ define void @store2(i32* %in, i8* %addr) {
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i32, i32* %in, align 4
+  %0 = load i32, ptr %in, align 4
   %conv = sext i32 %0 to i64
-  %arrayidx1 = getelementptr inbounds i32, i32* %in, i64 1
-  %1 = load i32, i32* %arrayidx1, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %in, i64 1
+  %1 = load i32, ptr %arrayidx1, align 4
   %conv2 = sext i32 %1 to i64
-  %arrayidx4 = getelementptr inbounds i32, i32* %in, i64 4
-  %2 = load i32, i32* %arrayidx4, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %in, i64 4
+  %2 = load i32, ptr %arrayidx4, align 4
   %conv5 = sext i32 %2 to i64
-  %arrayidx7 = getelementptr inbounds i32, i32* %in, i64 16
-  %3 = load i32, i32* %arrayidx7, align 4
+  %arrayidx7 = getelementptr inbounds i32, ptr %in, i64 16
+  %3 = load i32, ptr %arrayidx7, align 4
   %conv8 = sext i32 %3 to i64
-  %arrayidx10 = getelementptr inbounds i32, i32* %in, i64 25
-  %4 = load i32, i32* %arrayidx10, align 4
+  %arrayidx10 = getelementptr inbounds i32, ptr %in, i64 25
+  %4 = load i32, ptr %arrayidx10, align 4
   %conv11 = sext i32 %4 to i64
-  %arrayidx13 = getelementptr inbounds i32, i32* %in, i64 36
-  %5 = load i32, i32* %arrayidx13, align 4
+  %arrayidx13 = getelementptr inbounds i32, ptr %in, i64 36
+  %5 = load i32, ptr %arrayidx13, align 4
   %conv14 = sext i32 %5 to i64
-  %arrayidx16 = getelementptr inbounds i32, i32* %in, i64 49
-  %6 = load i32, i32* %arrayidx16, align 4
+  %arrayidx16 = getelementptr inbounds i32, ptr %in, i64 49
+  %6 = load i32, ptr %arrayidx16, align 4
   %conv17 = sext i32 %6 to i64
-  %arrayidx19 = getelementptr inbounds i32, i32* %in, i64 64
-  %7 = load i32, i32* %arrayidx19, align 4
+  %arrayidx19 = getelementptr inbounds i32, ptr %in, i64 64
+  %7 = load i32, ptr %arrayidx19, align 4
   %conv20 = sext i32 %7 to i64
   %s.sroa.10.0.insert.ext = zext i64 %conv20 to i512
   %s.sroa.10.0.insert.shift = shl nuw i512 %s.sroa.10.0.insert.ext, 448
@@ -102,6 +100,6 @@ entry:
   %s.sroa.0.0.insert.ext = zext i64 %conv to i512
   %s.sroa.0.0.insert.mask = or i512 %s.sroa.4.0.insert.mask, %s.sroa.4.0.insert.shift
   %s.sroa.0.0.insert.insert = or i512 %s.sroa.0.0.insert.mask, %s.sroa.0.0.insert.ext
-  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %s.sroa.0.0.insert.insert, i8* %addr)
+  call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %s.sroa.0.0.insert.insert, ptr %addr)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/ls64-intrinsics.ll b/llvm/test/CodeGen/AArch64/ls64-intrinsics.ll
index 45772dd9b515b..0f6f2d1221d6c 100644
--- a/llvm/test/CodeGen/AArch64/ls64-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/ls64-intrinsics.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64 -mattr=+ls64 -verify-machineinstrs -o - %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64_be -mattr=+ls64 -verify-machineinstrs -o - %s | FileCheck %s
 
-define void @test_ld64b({ i64, i64, i64, i64, i64, i64, i64, i64 }* %out, i8* %addr) {
+define void @test_ld64b(ptr %out, ptr %addr) {
 ; CHECK-LABEL: test_ld64b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ld64b x2, [x1]
@@ -12,12 +12,12 @@ define void @test_ld64b({ i64, i64, i64, i64, i64, i64, i64, i64 }* %out, i8* %a
 ; CHECK-NEXT:    stp x2, x3, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %val = tail call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* %addr)
-  store { i64, i64, i64, i64, i64, i64, i64, i64 } %val, { i64, i64, i64, i64, i64, i64, i64, i64 }* %out, align 8
+  %val = tail call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(ptr %addr)
+  store { i64, i64, i64, i64, i64, i64, i64, i64 } %val, ptr %out, align 8
   ret void
 }
 
-define void @test_st64b({ i64, i64, i64, i64, i64, i64, i64, i64 }* %in, i8* %addr) {
+define void @test_st64b(ptr %in, ptr %addr) {
 ; CHECK-LABEL: test_st64b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldp x8, x9, [x0, #48]
@@ -27,7 +27,7 @@ define void @test_st64b({ i64, i64, i64, i64, i64, i64, i64, i64 }* %in, i8* %ad
 ; CHECK-NEXT:    st64b x2, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  %val = load { i64, i64, i64, i64, i64, i64, i64, i64 }, { i64, i64, i64, i64, i64, i64, i64, i64 }* %in, align 8
+  %val = load { i64, i64, i64, i64, i64, i64, i64, i64 }, ptr %in, align 8
   %v0 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 0
   %v1 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 1
   %v2 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 2
@@ -36,11 +36,11 @@ entry:
   %v5 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 5
   %v6 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 6
   %v7 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 7
-  tail call void @llvm.aarch64.st64b(i8* %addr, i64 %v0, i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7)
+  tail call void @llvm.aarch64.st64b(ptr %addr, i64 %v0, i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7)
   ret void
 }
 
-define i64 @test_st64bv({ i64, i64, i64, i64, i64, i64, i64, i64 }* %in, i8* %addr) {
+define i64 @test_st64bv(ptr %in, ptr %addr) {
 ; CHECK-LABEL: test_st64bv:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldp x8, x9, [x0, #48]
@@ -50,7 +50,7 @@ define i64 @test_st64bv({ i64, i64, i64, i64, i64, i64, i64, i64 }* %in, i8* %ad
 ; CHECK-NEXT:    st64bv x0, x2, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  %val = load { i64, i64, i64, i64, i64, i64, i64, i64 }, { i64, i64, i64, i64, i64, i64, i64, i64 }* %in, align 8
+  %val = load { i64, i64, i64, i64, i64, i64, i64, i64 }, ptr %in, align 8
   %v0 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 0
   %v1 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 1
   %v2 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 2
@@ -59,11 +59,11 @@ entry:
   %v5 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 5
   %v6 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 6
   %v7 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 7
-  %status = tail call i64 @llvm.aarch64.st64bv(i8* %addr, i64 %v0, i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7)
+  %status = tail call i64 @llvm.aarch64.st64bv(ptr %addr, i64 %v0, i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7)
   ret i64 %status
 }
 
-define i64 @test_st64bv0({ i64, i64, i64, i64, i64, i64, i64, i64 }* %in, i8* %addr) {
+define i64 @test_st64bv0(ptr %in, ptr %addr) {
 ; CHECK-LABEL: test_st64bv0:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldp x8, x9, [x0, #48]
@@ -73,7 +73,7 @@ define i64 @test_st64bv0({ i64, i64, i64, i64, i64, i64, i64, i64 }* %in, i8* %a
 ; CHECK-NEXT:    st64bv0 x0, x2, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  %val = load { i64, i64, i64, i64, i64, i64, i64, i64 }, { i64, i64, i64, i64, i64, i64, i64, i64 }* %in, align 8
+  %val = load { i64, i64, i64, i64, i64, i64, i64, i64 }, ptr %in, align 8
   %v0 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 0
   %v1 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 1
   %v2 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 2
@@ -82,11 +82,11 @@ entry:
   %v5 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 5
   %v6 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 6
   %v7 = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } %val, 7
-  %status = tail call i64 @llvm.aarch64.st64bv0(i8* %addr, i64 %v0, i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7)
+  %status = tail call i64 @llvm.aarch64.st64bv0(ptr %addr, i64 %v0, i64 %v1, i64 %v2, i64 %v3, i64 %v4, i64 %v5, i64 %v6, i64 %v7)
   ret i64 %status
 }
 
-declare { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8*)
-declare void @llvm.aarch64.st64b(i8*, i64, i64, i64, i64, i64, i64, i64, i64)
-declare i64 @llvm.aarch64.st64bv(i8*, i64, i64, i64, i64, i64, i64, i64, i64)
-declare i64 @llvm.aarch64.st64bv0(i8*, i64, i64, i64, i64, i64, i64, i64, i64)
+declare { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(ptr)
+declare void @llvm.aarch64.st64b(ptr, i64, i64, i64, i64, i64, i64, i64, i64)
+declare i64 @llvm.aarch64.st64bv(ptr, i64, i64, i64, i64, i64, i64, i64, i64)
+declare i64 @llvm.aarch64.st64bv0(ptr, i64, i64, i64, i64, i64, i64, i64, i64)

diff  --git a/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll b/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll
index 005bf860fdf2c..1c1ec9f7944fd 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-madd.ll
@@ -20,20 +20,18 @@
 
 %class.D = type { %class.basic_string.base, [4 x i8] }
 %class.basic_string.base = type <{ i64, i64, i32 }>
- at a = global %class.D* zeroinitializer, align 8
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+ at a = global ptr zeroinitializer, align 8
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
 define internal void @fun() section ".text.startup" {
 entry:
   %tmp.i.i = alloca %class.D, align 8
-  %y = bitcast %class.D* %tmp.i.i to i8*
   br label %loop
 loop:
   %conv11.i.i = phi i64 [ 0, %entry ], [ %inc.i.i, %loop ]
   %i = phi i64 [ undef, %entry ], [ %inc.i.i, %loop ]
-  %x = load %class.D*, %class.D** getelementptr inbounds (%class.D*, %class.D** @a, i64 0), align 8
-  %arrayidx.i.i.i = getelementptr inbounds %class.D, %class.D* %x, i64 %conv11.i.i
-  %d = bitcast %class.D* %arrayidx.i.i.i to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 nonnull %y, i8* align 8 %d, i64 24, i1 false)
+  %x = load ptr, ptr @a, align 8
+  %arrayidx.i.i.i = getelementptr inbounds %class.D, ptr %x, i64 %conv11.i.i
+  call void @llvm.memcpy.p0.p0.i64(ptr align 8 nonnull %tmp.i.i, ptr align 8 %arrayidx.i.i.i, i64 24, i1 false)
   %inc.i.i = add i64 %i, 1
   %cmp.i.i = icmp slt i64 %inc.i.i, 0
   br i1 %cmp.i.i, label %loop, label %exit

diff  --git a/llvm/test/CodeGen/AArch64/machine-copy-prop.ll b/llvm/test/CodeGen/AArch64/machine-copy-prop.ll
index 2ac87f0004849..cb1a40aabea11 100644
--- a/llvm/test/CodeGen/AArch64/machine-copy-prop.ll
+++ b/llvm/test/CodeGen/AArch64/machine-copy-prop.ll
@@ -14,7 +14,7 @@
 ; CHECK-LABEL: foo:
 ; CHECK: ld2
 ; CHECK-NOT: // kill: def D{{[0-9]+}} killed D{{[0-9]+}}
-define void @foo(<2 x i32> %shuffle251, <8 x i8> %vtbl1.i, i8* %t2, <2 x i32> %vrsubhn_v2.i1364) {
+define void @foo(<2 x i32> %shuffle251, <8 x i8> %vtbl1.i, ptr %t2, <2 x i32> %vrsubhn_v2.i1364) {
 entry:
   %val0 = alloca [2 x i64], align 8
   %val1 = alloca <2 x i64>, align 16
@@ -24,7 +24,7 @@ entry:
   br i1 %cmp, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  store i1 true, i1* @failed, align 1
+  store i1 true, ptr @failed, align 1
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
@@ -36,18 +36,16 @@ if.end:                                           ; preds = %if.then, %entry
   %sub = add <2 x i32> %0, <i32 1, i32 0>
   %sext = sext <2 x i32> %sub to <2 x i64>
   %vset_lane603 = shufflevector <2 x i64> %sext, <2 x i64> undef, <1 x i32> zeroinitializer
-  %t1 = bitcast [2 x i64]* %val0 to i8*
-  call void @llvm.aarch64.neon.st2lane.v2i64.p0i8(<2 x i64> zeroinitializer, <2 x i64> zeroinitializer, i64 1, i8* %t1)
-  call void @llvm.aarch64.neon.st2lane.v1i64.p0i8(<1 x i64> <i64 4096>, <1 x i64> <i64 -1>, i64 0, i8* %t2)
-  %vld2_lane = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64> <i64 11>, <1 x i64> <i64 11>, i64 0, i8* %t2)
+  call void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64> zeroinitializer, <2 x i64> zeroinitializer, i64 1, ptr %val0)
+  call void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64> <i64 4096>, <1 x i64> <i64 -1>, i64 0, ptr %t2)
+  %vld2_lane = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64> <i64 11>, <1 x i64> <i64 11>, i64 0, ptr %t2)
   %vld2_lane.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 0
   %vld2_lane.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 1
-  %vld2_lane1 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64> %vld2_lane.0.extract, <1 x i64> %vld2_lane.1.extract, i64 0, i8* %t1)
+  %vld2_lane1 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64> %vld2_lane.0.extract, <1 x i64> %vld2_lane.1.extract, i64 0, ptr %val0)
   %vld2_lane1.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane1, 0
   %vld2_lane1.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane1, 1
-  %t3 = bitcast <2 x i64>* %val1 to i8*
-  call void @llvm.aarch64.neon.st2.v1i64.p0i8(<1 x i64> %vld2_lane1.0.extract, <1 x i64> %vld2_lane1.1.extract, i8* %t3)
-  %t4 = load <2 x i64>, <2 x i64>* %val1, align 16
+  call void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64> %vld2_lane1.0.extract, <1 x i64> %vld2_lane1.1.extract, ptr %val1)
+  %t4 = load <2 x i64>, ptr %val1, align 16
   %vsubhn = sub <2 x i64> <i64 11, i64 0>, %t4
   %vsubhn1 = lshr <2 x i64> %vsubhn, <i64 32, i64 32>
   %vsubhn2 = trunc <2 x i64> %vsubhn1 to <2 x i32>
@@ -65,7 +63,7 @@ if.end:                                           ; preds = %if.then, %entry
   br i1 %cmp2, label %if.end2, label %if.then2
 
 if.then2:                                       ; preds = %if.end
-  store i1 true, i1* @failed, align 1
+  store i1 true, ptr @failed, align 1
   br label %if.end2
 
 if.end2:                                        ; preds = %if.then682, %if.end
@@ -84,13 +82,13 @@ declare void @f2()
 
 declare <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16>, <4 x i16>)
 
-declare void @llvm.aarch64.neon.st2lane.v2i64.p0i8(<2 x i64>, <2 x i64>, i64, i8* nocapture)
+declare void @llvm.aarch64.neon.st2lane.v2i64.p0(<2 x i64>, <2 x i64>, i64, ptr nocapture)
 
-declare void @llvm.aarch64.neon.st2lane.v1i64.p0i8(<1 x i64>, <1 x i64>, i64, i8* nocapture)
+declare void @llvm.aarch64.neon.st2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr nocapture)
 
-declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i8(<1 x i64>, <1 x i64>, i64, i8*)
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0(<1 x i64>, <1 x i64>, i64, ptr)
 
-declare void @llvm.aarch64.neon.st2.v1i64.p0i8(<1 x i64>, <1 x i64>, i8* nocapture)
+declare void @llvm.aarch64.neon.st2.v1i64.p0(<1 x i64>, <1 x i64>, ptr nocapture)
 
 declare <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64>, <1 x i64>)
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-copy-remove.ll b/llvm/test/CodeGen/AArch64/machine-copy-remove.ll
index 75954f83c19c8..48333c27829c1 100644
--- a/llvm/test/CodeGen/AArch64/machine-copy-remove.ll
+++ b/llvm/test/CodeGen/AArch64/machine-copy-remove.ll
@@ -4,13 +4,13 @@
 ; CHECK: cbz x[[REG:[0-9]+]], [[BB:.LBB.*]]
 ; CHECK: [[BB]]:
 ; CHECK-NOT: mov x[[REG]], xzr
-define i64 @f_XX(i64 %n, i64* nocapture readonly %P) {
+define i64 @f_XX(i64 %n, ptr nocapture readonly %P) {
 entry:
   %tobool = icmp eq i64 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %0 = load i64, i64* %P
+  %0 = load i64, ptr %P
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
@@ -22,13 +22,13 @@ if.end:                                           ; preds = %entry, %if.then
 ; CHECK: cbz w[[REG:[0-9]+]], [[BB:.LBB.*]]
 ; CHECK: [[BB]]:
 ; CHECK-NOT: mov w[[REG]], wzr
-define i32 @f_WW(i32 %n, i32* nocapture readonly %P) {
+define i32 @f_WW(i32 %n, ptr nocapture readonly %P) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %0 = load i32, i32* %P
+  %0 = load i32, ptr %P
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
@@ -40,13 +40,13 @@ if.end:                                           ; preds = %entry, %if.then
 ; CHECK: cbz x[[REG:[0-9]+]], [[BB:.LBB.*]]
 ; CHECK: [[BB]]:
 ; CHECK-NOT: mov w[[REG]], wzr
-define i32 @f_XW(i64 %n, i32* nocapture readonly %P) {
+define i32 @f_XW(i64 %n, ptr nocapture readonly %P) {
 entry:
   %tobool = icmp eq i64 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %0 = load i32, i32* %P
+  %0 = load i32, ptr %P
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
@@ -60,13 +60,13 @@ if.end:                                           ; preds = %entry, %if.then
 ; CHECK: mov x[[REG]], xzr
 ; Do not remove the mov in this case because we do not know if the upper bits
 ; of the X register are zero.
-define i64 @f_WX(i32 %n, i64* nocapture readonly %P) {
+define i64 @f_WX(i32 %n, ptr nocapture readonly %P) {
 entry:
   %tobool = icmp eq i32 %n, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %0 = load i64, i64* %P
+  %0 = load i64, ptr %P
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then
@@ -81,7 +81,7 @@ if.end:                                           ; preds = %entry, %if.then
 ; CHECK-NOT: mov w[[REG]], wzr
 ; Because we returned w0 but x0 was marked live-in to the block, we didn't
 ; remove the <kill> on the str leading to a verification failure.
-define i32 @test_superreg(i64 %in, i64* %dest) {
+define i32 @test_superreg(i64 %in, ptr %dest) {
   %tst = icmp eq i64 %in, 0
   br i1 %tst, label %true, label %false
 
@@ -89,6 +89,6 @@ false:
   ret i32 42
 
 true:
-  store volatile i64 %in, i64* %dest
+  store volatile i64 %in, ptr %dest
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
index 3af1c6c5ca461..60c73384107b9 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
@@ -42,7 +42,7 @@ entry:
   br i1 %cmp63, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:
-  %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @A, i64 0, i64 0), align 4
+  %0 = load i32, ptr @A, align 4
   br label %for.body
 
 for.cond.cleanup:
@@ -98,7 +98,7 @@ entry:
   br i1 %cmp63, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:
-  %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @A, i64 0, i64 0), align 4
+  %0 = load i32, ptr @A, align 4
   %call0 = tail call i32 @_Z3usei(i32 %n)
   br label %for.body
 
@@ -116,7 +116,7 @@ for.body:
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
-define i32 @cant_sink_successive_store(i32* nocapture readnone %store, i32 %n) {
+define i32 @cant_sink_successive_store(ptr nocapture readnone %store, i32 %n) {
 ; CHECK-LABEL: cant_sink_successive_store:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
@@ -155,8 +155,8 @@ entry:
   br i1 %cmp63, label %for.body.preheader, label %for.cond.cleanup
 
 for.body.preheader:
-  %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @A, i64 0, i64 0), align 4
-  store i32 42, i32* %store, align 4
+  %0 = load i32, ptr @A, align 4
+  store i32 42, ptr %store, align 4
   br label %for.body
 
 for.cond.cleanup:

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-noredzone.ll b/llvm/test/CodeGen/AArch64/machine-outliner-noredzone.ll
index 36d860a6b85ea..aeedb838904cc 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-noredzone.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-noredzone.ll
@@ -18,17 +18,17 @@ define cc 10 void @bar() #0 {
   ; CHECK: bl OUTLINED_FUNCTION
   ; REDZONE-LABEL: bar
   ; REDZONE: bl OUTLINED_FUNCTION
-  %1 = load i32, i32* @x, align 4
+  %1 = load i32, ptr @x, align 4
   %2 = add nsw i32 %1, 1
-  store i32 %2, i32* @x, align 4
+  store i32 %2, ptr @x, align 4
   call void @baz()
-  %3 = load i32, i32* @x, align 4
+  %3 = load i32, ptr @x, align 4
   %4 = add nsw i32 %3, 1
-  store i32 %4, i32* @x, align 4
+  store i32 %4, ptr @x, align 4
   call void @baz()
-  %5 = load i32, i32* @x, align 4
+  %5 = load i32, ptr @x, align 4
   %6 = add nsw i32 %5, 1
-  store i32 %6, i32* @x, align 4
+  store i32 %6, ptr @x, align 4
   ret void
 }
 
@@ -43,34 +43,34 @@ define void @foo() #0 {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
   %4 = alloca i32, align 4
-  store i32 0, i32* %1, align 4
-  store i32 0, i32* %2, align 4
-  store i32 0, i32* %3, align 4
-  store i32 0, i32* %4, align 4
-  %5 = load i32, i32* %1, align 4
+  store i32 0, ptr %1, align 4
+  store i32 0, ptr %2, align 4
+  store i32 0, ptr %3, align 4
+  store i32 0, ptr %4, align 4
+  %5 = load i32, ptr %1, align 4
   %6 = add nsw i32 %5, 1
-  store i32 %6, i32* %1, align 4
-  %7 = load i32, i32* %3, align 4
+  store i32 %6, ptr %1, align 4
+  %7 = load i32, ptr %3, align 4
   %8 = add nsw i32 %7, 1
-  store i32 %8, i32* %3, align 4
-  %9 = load i32, i32* %4, align 4
+  store i32 %8, ptr %3, align 4
+  %9 = load i32, ptr %4, align 4
   %10 = add nsw i32 %9, 1
-  store i32 %10, i32* %4, align 4
-  %11 = load i32, i32* %2, align 4
+  store i32 %10, ptr %4, align 4
+  %11 = load i32, ptr %2, align 4
   %12 = add nsw i32 %11, 1
-  store i32 %12, i32* %2, align 4
-  %13 = load i32, i32* %1, align 4
+  store i32 %12, ptr %2, align 4
+  %13 = load i32, ptr %1, align 4
   %14 = add nsw i32 %13, 1
-  store i32 %14, i32* %1, align 4
-  %15 = load i32, i32* %3, align 4
+  store i32 %14, ptr %1, align 4
+  %15 = load i32, ptr %3, align 4
   %16 = add nsw i32 %15, 1
-  store i32 %16, i32* %3, align 4
-  %17 = load i32, i32* %4, align 4
+  store i32 %16, ptr %3, align 4
+  %17 = load i32, ptr %4, align 4
   %18 = add nsw i32 %17, 1
-  store i32 %18, i32* %4, align 4
-  %19 = load i32, i32* %2, align 4
+  store i32 %18, ptr %4, align 4
+  %19 = load i32, ptr %2, align 4
   %20 = add nsw i32 %19, -1
-  store i32 %20, i32* %2, align 4
+  store i32 %20, ptr %2, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-outline-bti.ll b/llvm/test/CodeGen/AArch64/machine-outliner-outline-bti.ll
index c30d31fa91b26..7761e197c3972 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-outline-bti.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-outline-bti.ll
@@ -9,7 +9,7 @@ define hidden void @foo() minsize "branch-target-enforcement"="true" {
 entry:
 ; CHECK: hint #34
 ; CHECK: b       OUTLINED_FUNCTION_0
-  store volatile i32 1, i32* @g, align 4
+  store volatile i32 1, ptr @g, align 4
   ret void
 }
 
@@ -17,6 +17,6 @@ define hidden void @bar() minsize "branch-target-enforcement"="true" {
 entry:
 ; CHECK: hint #34
 ; CHECK: b       OUTLINED_FUNCTION_0
-  store volatile i32 1, i32* @g, align 4
+  store volatile i32 1, ptr @g, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll b/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
index 5adefc7cb7a33..a9fc850ecd816 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-remarks.ll
@@ -56,8 +56,8 @@ define void @dog() #0 !dbg !8 {
 entry:
   %x = alloca i32, align 4
   %y = alloca i32, align 4
-  store i32 0, i32* %x, align 4
-  store i32 1, i32* %y, align 4, !dbg !12
+  store i32 0, ptr %x, align 4
+  store i32 1, ptr %y, align 4, !dbg !12
   ret void
 }
 
@@ -65,8 +65,8 @@ define void @cat() #0 !dbg !14 {
 entry:
   %x = alloca i32, align 4
   %y = alloca i32, align 4
-  store i32 0, i32* %x, align 4
-  store i32 1, i32* %y, align 4, !dbg !16
+  store i32 0, ptr %x, align 4
+  store i32 1, ptr %y, align 4, !dbg !16
   ret void
 }
 
@@ -77,12 +77,12 @@ define void @foo() #0 !dbg !18 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4, !dbg !24
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4, !dbg !26
+  store i32 1, ptr %1, align 4, !dbg !24
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4, !dbg !26
   ret void
 }
 
@@ -93,12 +93,12 @@ define void @bar() #0 !dbg !27 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4, !dbg !33
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4, !dbg !35
+  store i32 1, ptr %1, align 4, !dbg !33
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4, !dbg !35
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll
index 74d2795db14c1..1c3b0441e6bde 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-cfi.ll
@@ -19,12 +19,12 @@ define void @a() "sign-return-address"="all" "sign-return-address-key"="b_key" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK-NOT:          bl OUTLINED_FUNCTION_{{[0-9]+}}
 ; V8A:                hint #31
 ; V83A:               autibsp
@@ -44,12 +44,12 @@ define void @b() "sign-return-address"="all" "sign-return-address-key"="b_key" n
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                bl [[OUTLINED_FUNC:OUTLINED_FUNCTION_[0-9]+]]
 ; V8A:                  hint #31
 ; V83A:                 autibsp
@@ -68,12 +68,12 @@ define void @c() "sign-return-address"="all" "sign-return-address-key"="b_key" n
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                bl [[OUTLINED_FUNC]]
 ; V8A:                  hint #31
 ; V83A:                 autibsp

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-
diff -scope-same-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-
diff -scope-same-key.ll
index 4efbc3af5e953..07d561abc1b5e 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-
diff -scope-same-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-
diff -scope-same-key.ll
@@ -15,12 +15,12 @@ define void @a() "sign-return-address"="all" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:            hint #29
 ; V83A:           autiasp
   ret void
@@ -38,12 +38,12 @@ define void @b() "sign-return-address"="non-leaf" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A-NOT:          hint #29
 ; V83A-NOT:         autiasp
   ret void
@@ -61,12 +61,12 @@ define void @c() "sign-return-address"="all" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:            hint #29
 ; V83A:           autiasp
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll
index 9b281d19d486b..3e85142ae32ab 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-non-leaf.ll
@@ -16,12 +16,12 @@ define i64 @a(i64 %x) "sign-return-address"="non-leaf" "sign-return-address-key"
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
   call void asm sideeffect "mov x30, $0", "r,~{lr}"(i64 %x) #1
   ret i64 %x
 }
@@ -38,12 +38,12 @@ define i64 @b(i64 %x) "sign-return-address"="non-leaf" "sign-return-address-key"
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
   call void asm sideeffect "mov x30, $0", "r,~{lr}"(i64 %x) #1
   ret i64 %x
 }
@@ -60,12 +60,12 @@ define i64 @c(i64 %x) "sign-return-address"="non-leaf" "sign-return-address-key"
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
   call void asm sideeffect "mov x30, $0", "r,~{lr}"(i64 %x) #1
   ret i64 %x
 }

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-
diff -key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-
diff -key.ll
index 5138430331c01..a127895597355 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-
diff -key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-
diff -key.ll
@@ -15,12 +15,12 @@ define void @a() "sign-return-address"="all" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:            hint #29
 ; V83A:           autiasp
   ret void
@@ -39,12 +39,12 @@ define void @b() "sign-return-address"="all" "sign-return-address-key"="b_key" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A-NOT:          hint #29
 ; V83A-NOT:         autiasp
   ret void
@@ -62,12 +62,12 @@ define void @c() "sign-return-address"="all" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:            hint #29
 ; V83A:           autiasp
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll
index e0b42bf54be33..dbbd924a7147e 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-a.ll
@@ -14,12 +14,12 @@ define void @a() "sign-return-address"="all" "sign-return-address-key"="a_key" n
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:              hint #29
 ; V83A:             autiasp
   ret void
@@ -35,12 +35,12 @@ define void @b() "sign-return-address"="all" nounwind {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:              hint #29
 ; V83A:             autiasp
   ret void
@@ -56,12 +56,12 @@ define void @c() "sign-return-address"="all" nounwind {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:              hint #29
 ; V83A:             autiasp
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll
index 128ac6dc4b456..81724006a1720 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-same-scope-same-key-b.ll
@@ -15,12 +15,12 @@ define void @a() "sign-return-address"="all" "sign-return-address-key"="b_key" n
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:              hint #31
 ; V83A:             autibsp
   ret void
@@ -37,12 +37,12 @@ define void @b() "sign-return-address"="all" "sign-return-address-key"="b_key" n
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:              hint #31
 ; V83A:             autibsp
   ret void
@@ -59,12 +59,12 @@ define void @c() "sign-return-address"="all" "sign-return-address-key"="b_key" n
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; V8A:              hint #31
 ; V83A:             autibsp
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
index 540fa1c46dd67..b126a44486c1e 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-sp-mod.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple aarch64-arm-none-eabi -enable-machine-outliner \
 ; RUN:  -verify-machineinstrs %s -o - | FileCheck %s
 
- at v = common dso_local global i32* null, align 8
+ at v = common dso_local global ptr null, align 8
 
 ; CHECK-LABEL:  foo:                                    // @foo
 ; CHECK-NEXT:   // %bb.0:                               // %entry
@@ -12,12 +12,12 @@ define dso_local void @foo(i32 %x) #0 {
 entry:
   %0 = zext i32 %x to i64
   %vla = alloca i32, i64 %0, align 4
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
   ret void
 }
 
@@ -30,12 +30,12 @@ define dso_local void @bar(i32 %x) #0 {
 entry:
   %0 = zext i32 %x to i64
   %vla = alloca i32, i64 %0, align 4
-  store volatile i32* null, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
-  store volatile i32* %vla, i32** @v, align 8
+  store volatile ptr null, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
+  store volatile ptr %vla, ptr @v, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll
index 8e85389579241..b193e242369ea 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-subtarget.ll
@@ -18,12 +18,12 @@ define void @a() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                  retab
 ; CHECK-NOT:              auti[a,b]sp
   ret void
@@ -42,12 +42,12 @@ define void @b() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                  retab
 ; CHECK-NOT:              auti[a,b]sp
   ret void
@@ -66,12 +66,12 @@ define void @c() #1 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                  hint #31
 ; CHECK-NOT:              ret{{[a,b]}}
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
index 525e1c96538bd..bc4b074a057d0 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-thunk.ll
@@ -38,7 +38,7 @@ entry:
   ret i32 %cx
 }
 
-define hidden i32 @c(i32 (i32, i32, i32, i32)* %fptr) #0 {
+define hidden i32 @c(ptr %fptr) #0 {
 ; CHECK-LABEL:  c:                                      // @c
 ; CHECK:        // %bb.0:                               // %entry
 ; V8A-NEXT:         hint #25
@@ -54,7 +54,7 @@ entry:
   ret i32 %add
 }
 
-define hidden i32 @d(i32 (i32, i32, i32, i32)* %fptr) #0 {
+define hidden i32 @d(ptr %fptr) #0 {
 ; CHECK-LABEL:  d:                                      // @d
 ; CHECK:        // %bb.0:                               // %entry
 ; V8A-NEXT:         hint #25

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll
index 3837aa58f8a38..07c7cdbee9c61 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-v8-3.ll
@@ -15,12 +15,12 @@ define void @a() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                  retab
 ; CHECK-NOT:              auti
   ret void
@@ -37,12 +37,12 @@ define void @b() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                  retab
 ; CHECK-NOT:              auti
   ret void
@@ -59,12 +59,12 @@ define void @c() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
 ; CHECK:                  retab
 ; CHECK-NOT:              auti
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-tail.ll b/llvm/test/CodeGen/AArch64/machine-outliner-tail.ll
index 7d4ed6bd36192..94b5d7b0b29d5 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-tail.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-tail.ll
@@ -16,7 +16,7 @@ entry:
 
 declare void @z(i32, i32, i32, i32)
 
-define dso_local void @b(i32* nocapture readnone %p) {
+define dso_local void @b(ptr nocapture readnone %p) {
 entry:
   tail call void @z(i32 1, i32 2, i32 3, i32 4)
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-throw.ll b/llvm/test/CodeGen/AArch64/machine-outliner-throw.ll
index b8520896fb682..7e4c26e6491cc 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-throw.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-throw.ll
@@ -21,10 +21,9 @@ define dso_local i32 @_Z5func1i(i32 %x) #0 {
 entry:
   %mul = mul nsw i32 %x, %x
   %add = add nuw nsw i32 %mul, 1
-  %exception = tail call i8* @__cxa_allocate_exception(i64 4) #1
-  %0 = bitcast i8* %exception to i32*
-  store i32 %add, i32* %0, align 16
-  tail call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #2
+  %exception = tail call ptr @__cxa_allocate_exception(i64 4) #1
+  store i32 %add, ptr %exception, align 16
+  tail call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #2
   unreachable
 }
 
@@ -45,10 +44,9 @@ entry:
   %conv = zext i8 %x to i32
   %mul = mul nuw nsw i32 %conv, %conv
   %add = add nuw nsw i32 %mul, 1
-  %exception = tail call i8* @__cxa_allocate_exception(i64 4) #1
-  %0 = bitcast i8* %exception to i32*
-  store i32 %add, i32* %0, align 16
-  tail call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #2
+  %exception = tail call ptr @__cxa_allocate_exception(i64 4) #1
+  store i32 %add, ptr %exception, align 16
+  tail call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) #2
   unreachable
 }
 
@@ -62,9 +60,9 @@ entry:
 ; CHECK:      .cfi_endproc
 
 
- at _ZTIi = external dso_local constant i8*
-declare dso_local i8* @__cxa_allocate_exception(i64) local_unnamed_addr
-declare dso_local void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
+ at _ZTIi = external dso_local constant ptr
+declare dso_local ptr @__cxa_allocate_exception(i64) local_unnamed_addr
+declare dso_local void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr
 
 attributes #0 = { minsize noreturn optsize }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll b/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
index 425d616d88660..8740aac0549ee 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
@@ -38,7 +38,7 @@ entry:
   ret i32 %cx
 }
 
-define hidden i32 @c(i32 (i32, i32, i32, i32)* %fptr) {
+define hidden i32 @c(ptr %fptr) {
 ; CHECK-LABEL: c:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -54,7 +54,7 @@ entry:
   ret i32 %add
 }
 
-define hidden i32 @d(i32 (i32, i32, i32, i32)* %fptr) {
+define hidden i32 @d(ptr %fptr) {
 ; CHECK-LABEL: d:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner.ll b/llvm/test/CodeGen/AArch64/machine-outliner.ll
index 1ae905b497923..e6c5c94b00b55 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner.ll
@@ -24,12 +24,12 @@ define linkonce_odr void @fish() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
   ret void
 }
 
@@ -43,12 +43,12 @@ define void @turtle() section "TURTLE,turtle" {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
   ret void
 }
 
@@ -62,12 +62,12 @@ define void @cat() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
   ret void
 }
 
@@ -81,12 +81,12 @@ define void @dog() #0 {
   %4 = alloca i32, align 4
   %5 = alloca i32, align 4
   %6 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
-  store i32 2, i32* %2, align 4
-  store i32 3, i32* %3, align 4
-  store i32 4, i32* %4, align 4
-  store i32 5, i32* %5, align 4
-  store i32 6, i32* %6, align 4
+  store i32 1, ptr %1, align 4
+  store i32 2, ptr %2, align 4
+  store i32 3, ptr %3, align 4
+  store i32 4, ptr %4, align 4
+  store i32 5, ptr %5, align 4
+  store i32 6, ptr %6, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
index 9315d1741574a..e7e109170d6a1 100644
--- a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
+++ b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
@@ -9,7 +9,7 @@ target triple = "arm64-apple-ios8.0.0"
 ; The kill flags on the test had to be cleared because the AND was going to read
 ; registers in a BB after the test instruction.
 
-define i32 @test(i32* %ptr) {
+define i32 @test(ptr %ptr) {
 ; CHECK-LABEL: test:
 ; CHECK:       ; %bb.0: ; %bb
 ; CHECK-NEXT:    mov x8, x0
@@ -37,6 +37,6 @@ bb:
   br i1 %tmp342, label %bb343, label %.thread
 
 bb343:                                            ; preds = %.thread
-  store i32 %tmp341, i32* %ptr, align 4
+  store i32 %tmp341, ptr %ptr, align 4
   ret i32 -1
 }

diff  --git a/llvm/test/CodeGen/AArch64/machine_cse.ll b/llvm/test/CodeGen/AArch64/machine_cse.ll
index 51252a2a8428f..de28a2df27f66 100644
--- a/llvm/test/CodeGen/AArch64/machine_cse.ll
+++ b/llvm/test/CodeGen/AArch64/machine_cse.ll
@@ -11,18 +11,18 @@
 @d = external global i32
 @e = external global i32
 
-define void @combine-sign-comparisons-by-cse(i32 *%arg) {
+define void @combine-sign-comparisons-by-cse(ptr %arg) {
 ; CHECK: cmp
 ; CHECK: b.ge
 ; CHECK-NOT: cmp
 ; CHECK: b.le
 
 entry:
-  %a = load i32, i32* @a, align 4
-  %b = load i32, i32* @b, align 4
-  %c = load i32, i32* @c, align 4
-  %d = load i32, i32* @d, align 4
-  %e = load i32, i32* @e, align 4
+  %a = load i32, ptr @a, align 4
+  %b = load i32, ptr @b, align 4
+  %c = load i32, ptr @c, align 4
+  %d = load i32, ptr @d, align 4
+  %e = load i32, ptr @e, align 4
 
   %cmp = icmp slt i32 %a, %e
   br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
@@ -44,30 +44,30 @@ if.end:
 
 return:
   %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
-  store i32 %a, i32 *%arg
+  store i32 %a, ptr %arg
   ret void
 }
 
-define void @combine_vector_zeros(<8 x i8>* %p, <16 x i8>* %q) {
+define void @combine_vector_zeros(ptr %p, ptr %q) {
 ; CHECK-LABEL: combine_vector_zeros:
 ; CHECK: movi v[[REG:[0-9]+]].2d, #0
 ; CHECK-NOT: movi
 ; CHECK: str d[[REG]], [x0]
 ; CHECK: str q[[REG]], [x1]
 entry:
-  store <8 x i8> zeroinitializer, <8 x i8>* %p
-  store <16 x i8> zeroinitializer, <16 x i8>* %q
+  store <8 x i8> zeroinitializer, ptr %p
+  store <16 x i8> zeroinitializer, ptr %q
   ret void
 }
 
-define void @combine_vector_ones(<2 x i32>* %p, <4 x i32>* %q) {
+define void @combine_vector_ones(ptr %p, ptr %q) {
 ; CHECK-LABEL: combine_vector_ones:
 ; CHECK: movi v[[REG:[0-9]+]].2d, #0xffffffffffffffff
 ; CHECK-NOT: movi
 ; CHECK: str d[[REG]], [x0]
 ; CHECK: str q[[REG]], [x1]
 entry:
-  store <2 x i32> <i32 -1, i32 -1>, <2 x i32>* %p
-  store <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32>* %q
+  store <2 x i32> <i32 -1, i32 -1>, ptr %p
+  store <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, ptr %q
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/machine_cse_illegal_hoist.ll b/llvm/test/CodeGen/AArch64/machine_cse_illegal_hoist.ll
index a200e664e25e8..a2a80e04b3f2e 100644
--- a/llvm/test/CodeGen/AArch64/machine_cse_illegal_hoist.ll
+++ b/llvm/test/CodeGen/AArch64/machine_cse_illegal_hoist.ll
@@ -11,42 +11,42 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-arm-none-eabi"
 
 @var = hidden local_unnamed_addr global i32 0, align 4
- at _ZTIi = external dso_local constant i8*
+ at _ZTIi = external dso_local constant ptr
 declare dso_local void @_Z2fnv() local_unnamed_addr #1
 declare dso_local i32 @__gxx_personality_v0(...)
-declare i32 @llvm.eh.typeid.for(i8*) #2
-declare dso_local i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+declare i32 @llvm.eh.typeid.for(ptr) #2
+declare dso_local ptr @__cxa_begin_catch(ptr) local_unnamed_addr
 declare dso_local void @__cxa_end_catch() local_unnamed_addr
 
-define hidden i32 @_Z7examplev() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define hidden i32 @_Z7examplev() personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @_Z2fnv() to label %try.cont unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @_ZTIi to i8*)
-          catch i8* null
-  %1 = extractvalue { i8*, i32 } %0, 0
-  %2 = extractvalue { i8*, i32 } %0, 1
-  %3 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+  %0 = landingpad { ptr, i32 }
+          catch ptr @_ZTIi
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = extractvalue { ptr, i32 } %0, 1
+  %3 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
   %matches = icmp eq i32 %2, %3
-  %4 = tail call i8* @__cxa_begin_catch(i8* %1)
-  %5 = load i32, i32* @var, align 4
+  %4 = tail call ptr @__cxa_begin_catch(ptr %1)
+  %5 = load i32, ptr @var, align 4
   br i1 %matches, label %catch1, label %catch
 
 catch1:                                           ; preds = %lpad
   %or3 = or i32 %5, 4
-  store i32 %or3, i32* @var, align 4
+  store i32 %or3, ptr @var, align 4
   tail call void @__cxa_end_catch()
   br label %try.cont
 
 try.cont:                                         ; preds = %entry, %catch1, %catch
-  %6 = load i32, i32* @var, align 4
+  %6 = load i32, ptr @var, align 4
   ret i32 %6
 
 catch:                                            ; preds = %lpad
   %or = or i32 %5, 8
-  store i32 %or, i32* @var, align 4
+  store i32 %or, ptr @var, align 4
   tail call void @__cxa_end_catch()
   br label %try.cont
 }

diff  --git a/llvm/test/CodeGen/AArch64/macho-global-symbols.ll b/llvm/test/CodeGen/AArch64/macho-global-symbols.ll
index d68abad57ccd6..aa5acfee95b2f 100644
--- a/llvm/test/CodeGen/AArch64/macho-global-symbols.ll
+++ b/llvm/test/CodeGen/AArch64/macho-global-symbols.ll
@@ -3,12 +3,12 @@
 ; All global symbols must be at-most linker-private for AArch64 because we don't
 ; use section-relative relocations in MachO.
 
-define i8* @private_sym() {
+define ptr @private_sym() {
 ; CHECK-LABEL: private_sym:
 ; CHECK:     adrp [[HIBITS:x[0-9]+]], l_var at PAGE
 ; CHECK:     add x0, [[HIBITS]], l_var at PAGEOFF
 
-  ret i8* getelementptr([2 x i8], [2 x i8]* @var, i32 0, i32 0)
+  ret ptr @var
 }
 
 ; CHECK:     .section __TEXT,__cstring

diff  --git a/llvm/test/CodeGen/AArch64/memcpy-f128.ll b/llvm/test/CodeGen/AArch64/memcpy-f128.ll
index bc6ffb140aa43..5b354dd23e01d 100644
--- a/llvm/test/CodeGen/AArch64/memcpy-f128.ll
+++ b/llvm/test/CodeGen/AArch64/memcpy-f128.ll
@@ -9,8 +9,8 @@ define void @test1() {
 ; CHECK-LABEL: @test1
 ; CHECK: ret
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 undef, i8* align 8 bitcast (%structA* @stubA to i8*), i64 48, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 8 undef, ptr align 8 @stubA, i64 48, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)

diff  --git a/llvm/test/CodeGen/AArch64/memset-inline.ll b/llvm/test/CodeGen/AArch64/memset-inline.ll
index 66731ac0f04c3..02d852b5ce45a 100644
--- a/llvm/test/CodeGen/AArch64/memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/memset-inline.ll
@@ -2,31 +2,31 @@
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -mattr=-neon | FileCheck %s --check-prefixes=ALL,GPR
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -mattr=neon  | FileCheck %s --check-prefixes=ALL,NEON
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
 ; /////////////////////////////////////////////////////////////////////////////
 
-define void @memset_1(i8* %a, i8 %value) nounwind {
+define void @memset_1(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_1:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    strb w1, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 1, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 1, i1 0)
   ret void
 }
 
-define void @memset_2(i8* %a, i8 %value) nounwind {
+define void @memset_2(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_2:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    bfi w1, w1, #8, #24
 ; ALL-NEXT:    strh w1, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 2, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 2, i1 0)
   ret void
 }
 
-define void @memset_4(i8* %a, i8 %value) nounwind {
+define void @memset_4(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_4:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    mov w8, #16843009
@@ -34,11 +34,11 @@ define void @memset_4(i8* %a, i8 %value) nounwind {
 ; ALL-NEXT:    mul w8, w9, w8
 ; ALL-NEXT:    str w8, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 4, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 4, i1 0)
   ret void
 }
 
-define void @memset_8(i8* %a, i8 %value) nounwind {
+define void @memset_8(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_8:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -47,11 +47,11 @@ define void @memset_8(i8* %a, i8 %value) nounwind {
 ; ALL-NEXT:    mul x8, x9, x8
 ; ALL-NEXT:    str x8, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 8, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 8, i1 0)
   ret void
 }
 
-define void @memset_16(i8* %a, i8 %value) nounwind {
+define void @memset_16(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -60,11 +60,11 @@ define void @memset_16(i8* %a, i8 %value) nounwind {
 ; ALL-NEXT:    mul x8, x9, x8
 ; ALL-NEXT:    stp x8, x8, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 16, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 16, i1 0)
   ret void
 }
 
-define void @memset_32(i8* %a, i8 %value) nounwind {
+define void @memset_32(ptr %a, i8 %value) nounwind {
 ; GPR-LABEL: memset_32:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -80,11 +80,11 @@ define void @memset_32(i8* %a, i8 %value) nounwind {
 ; NEON-NEXT:    dup v0.16b, w1
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 32, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 32, i1 0)
   ret void
 }
 
-define void @memset_64(i8* %a, i8 %value) nounwind {
+define void @memset_64(ptr %a, i8 %value) nounwind {
 ; GPR-LABEL: memset_64:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -103,13 +103,13 @@ define void @memset_64(i8* %a, i8 %value) nounwind {
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    stp q0, q0, [x0, #32]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 64, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 64, i1 0)
   ret void
 }
 
 ; /////////////////////////////////////////////////////////////////////////////
 
-define void @aligned_memset_16(i8* align 16 %a, i8 %value) nounwind {
+define void @aligned_memset_16(ptr align 16 %a, i8 %value) nounwind {
 ; ALL-LABEL: aligned_memset_16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -118,11 +118,11 @@ define void @aligned_memset_16(i8* align 16 %a, i8 %value) nounwind {
 ; ALL-NEXT:    mul x8, x9, x8
 ; ALL-NEXT:    stp x8, x8, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* align 16 %a, i8 %value, i64 16, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr align 16 %a, i8 %value, i64 16, i1 0)
   ret void
 }
 
-define void @aligned_memset_32(i8* align 32 %a, i8 %value) nounwind {
+define void @aligned_memset_32(ptr align 32 %a, i8 %value) nounwind {
 ; GPR-LABEL: aligned_memset_32:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -138,11 +138,11 @@ define void @aligned_memset_32(i8* align 32 %a, i8 %value) nounwind {
 ; NEON-NEXT:    dup v0.16b, w1
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* align 32 %a, i8 %value, i64 32, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr align 32 %a, i8 %value, i64 32, i1 0)
   ret void
 }
 
-define void @aligned_memset_64(i8* align 64 %a, i8 %value) nounwind {
+define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
 ; GPR-LABEL: aligned_memset_64:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -161,58 +161,58 @@ define void @aligned_memset_64(i8* align 64 %a, i8 %value) nounwind {
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    stp q0, q0, [x0, #32]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* align 64 %a, i8 %value, i64 64, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 %value, i64 64, i1 0)
   ret void
 }
 
 ; /////////////////////////////////////////////////////////////////////////////
 
-define void @bzero_1(i8* %a) nounwind {
+define void @bzero_1(ptr %a) nounwind {
 ; ALL-LABEL: bzero_1:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    strb wzr, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 1, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 1, i1 0)
   ret void
 }
 
-define void @bzero_2(i8* %a) nounwind {
+define void @bzero_2(ptr %a) nounwind {
 ; ALL-LABEL: bzero_2:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    strh wzr, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 2, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 2, i1 0)
   ret void
 }
 
-define void @bzero_4(i8* %a) nounwind {
+define void @bzero_4(ptr %a) nounwind {
 ; ALL-LABEL: bzero_4:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    str wzr, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 4, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 4, i1 0)
   ret void
 }
 
-define void @bzero_8(i8* %a) nounwind {
+define void @bzero_8(ptr %a) nounwind {
 ; ALL-LABEL: bzero_8:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    str xzr, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 8, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 8, i1 0)
   ret void
 }
 
-define void @bzero_16(i8* %a) nounwind {
+define void @bzero_16(ptr %a) nounwind {
 ; ALL-LABEL: bzero_16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    stp xzr, xzr, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 16, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 16, i1 0)
   ret void
 }
 
-define void @bzero_32(i8* %a) nounwind {
+define void @bzero_32(ptr %a) nounwind {
 ; GPR-LABEL: bzero_32:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    adrp x8, .LCPI15_0
@@ -225,11 +225,11 @@ define void @bzero_32(i8* %a) nounwind {
 ; NEON-NEXT:    movi v0.2d, #0000000000000000
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 32, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 32, i1 0)
   ret void
 }
 
-define void @bzero_64(i8* %a) nounwind {
+define void @bzero_64(ptr %a) nounwind {
 ; GPR-LABEL: bzero_64:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    adrp x8, .LCPI16_0
@@ -244,22 +244,22 @@ define void @bzero_64(i8* %a) nounwind {
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    stp q0, q0, [x0, #32]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 0, i64 64, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 0, i64 64, i1 0)
   ret void
 }
 
 ; /////////////////////////////////////////////////////////////////////////////
 
-define void @aligned_bzero_16(i8* %a) nounwind {
+define void @aligned_bzero_16(ptr %a) nounwind {
 ; ALL-LABEL: aligned_bzero_16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    stp xzr, xzr, [x0]
 ; ALL-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* align 16 %a, i8 0, i64 16, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr align 16 %a, i8 0, i64 16, i1 0)
   ret void
 }
 
-define void @aligned_bzero_32(i8* %a) nounwind {
+define void @aligned_bzero_32(ptr %a) nounwind {
 ; GPR-LABEL: aligned_bzero_32:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    adrp x8, .LCPI18_0
@@ -272,11 +272,11 @@ define void @aligned_bzero_32(i8* %a) nounwind {
 ; NEON-NEXT:    movi v0.2d, #0000000000000000
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* align 32 %a, i8 0, i64 32, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr align 32 %a, i8 0, i64 32, i1 0)
   ret void
 }
 
-define void @aligned_bzero_64(i8* %a) nounwind {
+define void @aligned_bzero_64(ptr %a) nounwind {
 ; GPR-LABEL: aligned_bzero_64:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    adrp x8, .LCPI19_0
@@ -291,6 +291,6 @@ define void @aligned_bzero_64(i8* %a) nounwind {
 ; NEON-NEXT:    stp q0, q0, [x0]
 ; NEON-NEXT:    stp q0, q0, [x0, #32]
 ; NEON-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* align 64 %a, i8 0, i64 64, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 64, i1 0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/memset-vs-memset-inline.ll b/llvm/test/CodeGen/AArch64/memset-vs-memset-inline.ll
index 47d7dd1ad4899..97cfb13bcd5eb 100644
--- a/llvm/test/CodeGen/AArch64/memset-vs-memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/memset-vs-memset-inline.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
-declare void @llvm.memset.inline.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
+declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 
-define void @test1(i8* %a, i8 %value) nounwind {
+define void @test1(ptr %a, i8 %value) nounwind {
 ; CHECK-LABEL: test1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
@@ -13,25 +13,25 @@ define void @test1(i8* %a, i8 %value) nounwind {
 ; CHECK-NEXT:    mul x8, x9, x8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 8, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 8, i1 0)
   ret void
 }
 
-define void @regular_memset_calls_external_function(i8* %a, i8 %value) nounwind {
+define void @regular_memset_calls_external_function(ptr %a, i8 %value) nounwind {
 ; CHECK-LABEL: regular_memset_calls_external_function:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w2, #1024
 ; CHECK-NEXT:    b memset
-  tail call void @llvm.memset.p0i8.i64(i8* %a, i8 %value, i64 1024, i1 0)
+  tail call void @llvm.memset.p0.i64(ptr %a, i8 %value, i64 1024, i1 0)
   ret void
 }
 
-define void @inlined_set_doesnt_call_external_function(i8* %a, i8 %value) nounwind {
+define void @inlined_set_doesnt_call_external_function(ptr %a, i8 %value) nounwind {
 ; CHECK-LABEL: inlined_set_doesnt_call_external_function:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    dup v0.16b, w1
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    stp q0, q0, [x0, #32]
-  tail call void @llvm.memset.inline.p0i8.i64(i8* %a, i8 %value, i64 1024, i1 0)
+  tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 1024, i1 0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/memset.ll b/llvm/test/CodeGen/AArch64/memset.ll
index 4d1d2241c05ab..e25e7f93a44e6 100644
--- a/llvm/test/CodeGen/AArch64/memset.ll
+++ b/llvm/test/CodeGen/AArch64/memset.ll
@@ -8,11 +8,11 @@ target triple = "aarch64-unknown-linux-gnu"
 ; CHECK-NEXT: stp
 ; CHECK-NEXT: stp
 ; CHECK-NEXT: ret
-define void @memset_call(i8* %0, i32 %1) {
+define void @memset_call(ptr %0, i32 %1) {
   %3 = trunc i32 %1 to i8
-  call void @llvm.memset.p0i8.i64(i8* %0, i8 %3, i64 64, i1 false)
+  call void @llvm.memset.p0.i64(ptr %0, i8 %3, i64 64, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1 immarg)
+declare void @llvm.memset.p0.i64(ptr, i8, i64, i1 immarg)
 

diff  --git a/llvm/test/CodeGen/AArch64/memsize-remarks.ll b/llvm/test/CodeGen/AArch64/memsize-remarks.ll
index dffbf0438ac01..93e3d6fb02607 100644
--- a/llvm/test/CodeGen/AArch64/memsize-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/memsize-remarks.ll
@@ -5,141 +5,141 @@ source_filename = "memsize.c"
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios7.0.0"
 
-declare i8* @__memmove_chk(i8*, i8*, i64, i64) #1
-declare i8* @__memcpy_chk(i8*, i8*, i64, i64) #1
-declare i8* @__memset_chk(i8*, i32, i64, i64) #1
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg) #2
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) argmemonly nounwind willreturn writeonly
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1 immarg) argmemonly nounwind willreturn
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) argmemonly nounwind willreturn
-declare void @bzero(i8* nocapture, i64) nofree nounwind
-declare void @bcopy(i8* nocapture, i8* nocapture, i64) nofree nounwind
-declare i8* @memset(i8*, i32, i64)
+declare ptr @__memmove_chk(ptr, ptr, i64, i64) #1
+declare ptr @__memcpy_chk(ptr, ptr, i64, i64) #1
+declare ptr @__memset_chk(ptr, i32, i64, i64) #1
+declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg) #2
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) argmemonly nounwind willreturn writeonly
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1 immarg) argmemonly nounwind willreturn
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) argmemonly nounwind willreturn
+declare void @bzero(ptr nocapture, i64) nofree nounwind
+declare void @bcopy(ptr nocapture, ptr nocapture, i64) nofree nounwind
+declare ptr @memset(ptr, i32, i64)
 
-define void @memcpy_dynamic(i8* %d, i8* %s, i64 %l) #0 !dbg !14 {
+define void @memcpy_dynamic(ptr %d, ptr %s, i64 %l) #0 !dbg !14 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !16
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !16
 ; GISEL: remark: memsize.c:4:3: Call to memcpy.{{$}}
-  %call = call i8* @__memcpy_chk(i8* %d, i8* %s, i64 %l, i64 %0) #4, !dbg !17
+  %call = call ptr @__memcpy_chk(ptr %d, ptr %s, i64 %l, i64 %0) #4, !dbg !17
   ret void, !dbg !18
 }
 
-define void @memcpy_single(i8* %d, i8* %s, i64 %l) #0 !dbg !23 {
+define void @memcpy_single(ptr %d, ptr %s, i64 %l) #0 !dbg !23 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !24
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !24
 ; GISEL: remark: memsize.c:10:3: Call to memcpy. Memory operation size: 1 bytes.
-  %call = call i8* @__memcpy_chk(i8* %d, i8* %s, i64 1, i64 %0) #4, !dbg !25
+  %call = call ptr @__memcpy_chk(ptr %d, ptr %s, i64 1, i64 %0) #4, !dbg !25
   ret void, !dbg !26
 }
 
-define void @memcpy_intrinsic(i8* %d, i8* %s, i64 %l) #0 {
+define void @memcpy_intrinsic(ptr %d, ptr %s, i64 %l) #0 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false)
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false)
 ; GISEL: remark: <unknown>:0:0: Call to memcpy. Memory operation size: 1 bytes.
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %d, i8* %s, i64 1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %d, ptr %s, i64 1, i1 false)
   ret void
 }
 
-define void @memcpy_static(i8* %d, i8* %s, i64 %l) #0 !dbg !27 {
+define void @memcpy_static(ptr %d, ptr %s, i64 %l) #0 !dbg !27 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !28
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !28
 ; GISEL: remark: memsize.c:13:3: Call to memcpy. Memory operation size: 100 bytes.
-  %call = call i8* @__memcpy_chk(i8* %d, i8* %s, i64 100, i64 %0) #4, !dbg !29
+  %call = call ptr @__memcpy_chk(ptr %d, ptr %s, i64 100, i64 %0) #4, !dbg !29
   ret void, !dbg !30
 }
 
-define void @memcpy_huge(i8* %d, i8* %s, i64 %l) #0 !dbg !31 {
+define void @memcpy_huge(ptr %d, ptr %s, i64 %l) #0 !dbg !31 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !32
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !32
 ; GISEL: remark: memsize.c:16:3: Call to memcpy. Memory operation size: 100000 bytes.
-  %call = call i8* @__memcpy_chk(i8* %d, i8* %s, i64 100000, i64 %0) #4, !dbg !33
+  %call = call ptr @__memcpy_chk(ptr %d, ptr %s, i64 100000, i64 %0) #4, !dbg !33
   ret void, !dbg !34
 }
 
-define void @memmove_dynamic(i8* %d, i8* %s, i64 %l) #0 {
+define void @memmove_dynamic(ptr %d, ptr %s, i64 %l) #0 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false)
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false)
 ; GISEL: remark: <unknown>:0:0: Call to memmove.{{$}}
-  %call = call i8* @__memmove_chk(i8* %d, i8* %s, i64 %l, i64 %0) #4
+  %call = call ptr @__memmove_chk(ptr %d, ptr %s, i64 %l, i64 %0) #4
   ret void
 }
 
-define void @memmove_single(i8* %d, i8* %s, i64 %l) #0 {
+define void @memmove_single(ptr %d, ptr %s, i64 %l) #0 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false)
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false)
 ; GISEL: remark: <unknown>:0:0: Call to memmove. Memory operation size: 1 bytes.
-  %call = call i8* @__memmove_chk(i8* %d, i8* %s, i64 1, i64 %0) #4
+  %call = call ptr @__memmove_chk(ptr %d, ptr %s, i64 1, i64 %0) #4
   ret void
 }
 
-define void @memmove_static(i8* %d, i8* %s, i64 %l) #0 {
+define void @memmove_static(ptr %d, ptr %s, i64 %l) #0 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false)
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false)
 ; GISEL: remark: <unknown>:0:0: Call to memmove. Memory operation size: 100 bytes.
-  %call = call i8* @__memmove_chk(i8* %d, i8* %s, i64 100, i64 %0) #4
+  %call = call ptr @__memmove_chk(ptr %d, ptr %s, i64 100, i64 %0) #4
   ret void
 }
 
-define void @memmove_huge(i8* %d, i8* %s, i64 %l) #0 {
+define void @memmove_huge(ptr %d, ptr %s, i64 %l) #0 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false)
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false)
 ; GISEL: remark: <unknown>:0:0: Call to memmove. Memory operation size: 100000 bytes.
-  %call = call i8* @__memmove_chk(i8* %d, i8* %s, i64 100000, i64 %0) #4
+  %call = call ptr @__memmove_chk(ptr %d, ptr %s, i64 100000, i64 %0) #4
   ret void
 }
 
-define void @memset_dynamic(i8* %d, i64 %l) #0 !dbg !38 {
+define void @memset_dynamic(ptr %d, i64 %l) #0 !dbg !38 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !39
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !39
 ; GISEL: remark: memsize.c:22:3: Call to memset.{{$}}
-  %call = call i8* @__memset_chk(i8* %d, i32 0, i64 %l, i64 %0) #4, !dbg !40
+  %call = call ptr @__memset_chk(ptr %d, i32 0, i64 %l, i64 %0) #4, !dbg !40
   ret void, !dbg !41
 }
 
-define void @memset_single(i8* %d, i64 %l) #0 !dbg !46 {
+define void @memset_single(ptr %d, i64 %l) #0 !dbg !46 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !47
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !47
 ; GISEL: remark: memsize.c:28:3: Call to memset. Memory operation size: 1 bytes.
-  %call = call i8* @__memset_chk(i8* %d, i32 0, i64 1, i64 %0) #4, !dbg !48
+  %call = call ptr @__memset_chk(ptr %d, i32 0, i64 1, i64 %0) #4, !dbg !48
   ret void, !dbg !49
 }
 
-define void @memset_static(i8* %d, i64 %l) #0 !dbg !50 {
+define void @memset_static(ptr %d, i64 %l) #0 !dbg !50 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !51
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !51
 ; GISEL: remark: memsize.c:31:3: Call to memset. Memory operation size: 100 bytes.
-  %call = call i8* @__memset_chk(i8* %d, i32 0, i64 100, i64 %0) #4, !dbg !52
+  %call = call ptr @__memset_chk(ptr %d, i32 0, i64 100, i64 %0) #4, !dbg !52
   ret void, !dbg !53
 }
 
-define void @memset_huge(i8* %d, i64 %l) #0 !dbg !54 {
+define void @memset_huge(ptr %d, i64 %l) #0 !dbg !54 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !55
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !55
 ; GISEL: remark: memsize.c:34:3: Call to memset. Memory operation size: 100000 bytes.
-  %call = call i8* @__memset_chk(i8* %d, i32 0, i64 100000, i64 %0) #4, !dbg !56
+  %call = call ptr @__memset_chk(ptr %d, i32 0, i64 100000, i64 %0) #4, !dbg !56
   ret void, !dbg !57
 }
 
-define void @memset_empty(i8* %d, i64 %l) #0 !dbg !42 {
+define void @memset_empty(ptr %d, i64 %l) #0 !dbg !42 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !43
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !43
 ; GISEL: remark: memsize.c:25:3: Call to memset. Memory operation size: 0 bytes.
-  %call = call i8* @__memset_chk(i8* %d, i32 0, i64 0, i64 %0) #4, !dbg !44
+  %call = call ptr @__memset_chk(ptr %d, i32 0, i64 0, i64 %0) #4, !dbg !44
   ret void, !dbg !45
 }
 
 ; YAML-LABEL: Function:        memcpy_empty
-define void @memcpy_empty(i8* %d, i8* %s, i64 %l) #0 !dbg !19 {
+define void @memcpy_empty(ptr %d, ptr %s, i64 %l) #0 !dbg !19 {
 entry:
-  %0 = call i64 @llvm.objectsize.i64.p0i8(i8* %d, i1 false, i1 true, i1 false), !dbg !20
+  %0 = call i64 @llvm.objectsize.i64.p0(ptr %d, i1 false, i1 true, i1 false), !dbg !20
 ; GISEL: remark: memsize.c:7:3: Call to memcpy. Memory operation size: 0 bytes.
-  %call = call i8* @__memcpy_chk(i8* %d, i8* %s, i64 0, i64 %0) #4, !dbg !21
+  %call = call ptr @__memcpy_chk(ptr %d, ptr %s, i64 0, i64 %0) #4, !dbg !21
   ret void, !dbg !22
 }
 
 ; Emit remarks for memcpy, memmove, memset, bzero, bcopy with known constant
 ; sizes to an object of known size.
-define void @known_call_with_dereferenceable_bytes(i8* dereferenceable(42) %dst, i8* dereferenceable(314) %src) {
+define void @known_call_with_dereferenceable_bytes(ptr dereferenceable(42) %dst, ptr dereferenceable(314) %src) {
 ; GISEL: Call to memset. Memory operation size: 1 bytes.
 ; GISEL-NOT:  Read Variables:
 ; GISEL-NEXT:  Written Variables: <unknown> (42 bytes).
@@ -170,7 +170,7 @@ define void @known_call_with_dereferenceable_bytes(i8* dereferenceable(42) %dst,
 ; YAML-NEXT:    - StoreAtomic:     'false'
 ; YAML-NEXT:    - String:          .
 ; YAML-NEXT:  ...
-  call void @llvm.memset.p0i8.i64(i8* %dst, i8 0, i64 1, i1 false)
+  call void @llvm.memset.p0.i64(ptr %dst, i8 0, i64 1, i1 false)
 
 ; GISEL: Call to memcpy. Memory operation size: 1 bytes.
 ; GISEL-NEXT:  Read Variables: <unknown> (314 bytes).
@@ -208,7 +208,7 @@ define void @known_call_with_dereferenceable_bytes(i8* dereferenceable(42) %dst,
 ; YAML-NEXT:    - StoreAtomic:     'false'
 ; YAML-NEXT:    - String:          .
 ; YAML-NEXT:  ...
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 1, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 1, i1 false)
 
 ; GISEL: Call to memmove. Memory operation size: 1 bytes.
 ; GISEL-NEXT:  Read Variables: <unknown> (314 bytes).
@@ -246,7 +246,7 @@ define void @known_call_with_dereferenceable_bytes(i8* dereferenceable(42) %dst,
 ; YAML-NEXT:    - StoreAtomic:     'false'
 ; YAML-NEXT:    - String:          .
 ; YAML-NEXT:  ...
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 1, i1 false)
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 1, i1 false)
 
 ; GISEL: Call to bzero. Memory operation size: 1 bytes.
 ; GISEL-NOT:  Read Variables:
@@ -269,7 +269,7 @@ define void @known_call_with_dereferenceable_bytes(i8* dereferenceable(42) %dst,
 ; YAML-NEXT:    - String:          ' bytes)'
 ; YAML-NEXT:    - String:          .
 ; YAML-NEXT:  ...
-  call void @bzero(i8* %dst, i64 1)
+  call void @bzero(ptr %dst, i64 1)
 
 ; GISEL: Call to bcopy. Memory operation size: 1 bytes.
 ; GISEL-NEXT:  Read Variables: <unknown> (314 bytes).
@@ -298,7 +298,7 @@ define void @known_call_with_dereferenceable_bytes(i8* dereferenceable(42) %dst,
 ; YAML-NEXT:    - String:          ' bytes)'
 ; YAML-NEXT:    - String:          .
 ; YAML-NEXT:  ...
-  call void @bcopy(i8* %dst, i8* %src, i64 1)
+  call void @bcopy(ptr %dst, ptr %src, i64 1)
   ret void
 }
 
@@ -310,8 +310,8 @@ bb:
 ; GISEL: remark: <unknown>:0:0: Call to memcpy. Memory operation size: 24 bytes.{{$}}
 ; GISEL-NEXT: Read Variables: koala (56 bytes).
 ; GISEL-NEXT: Written Variables: dropbear (24 bytes).
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 getelementptr inbounds ([3 x i8], [3 x i8]* @dropbear, i64 0, i64 0),
-                                            i8* getelementptr inbounds ([7 x i8], [7 x i8]* @koala, i64 0, i64 0), i64 24, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 1 @dropbear,
+                                            ptr @koala, i64 24, i1 false)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
index 1c962315954c4..fc5813b99ea55 100644
--- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
@@ -2,11 +2,11 @@
 ; RUN: llc < %s -mcpu cortex-a53 -mtriple=aarch64-eabi | FileCheck %s --check-prefix=A53
 
 ; PR26827 - Merge stores causes wrong dependency.
-%struct1 = type { %struct1*, %struct1*, i32, i32, i16, i16, void (i32, i32, i8*)*, i8* }
+%struct1 = type { ptr, ptr, i32, i32, i16, i16, ptr, ptr }
 @gv0 = internal unnamed_addr global i32 0, align 4
- at gv1 = internal unnamed_addr global %struct1** null, align 8
+ at gv1 = internal unnamed_addr global ptr null, align 8
 
-define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) uwtable {
+define void @test(ptr %fde, i32 %fd, ptr %func, ptr %arg) uwtable {
 ;CHECK-LABEL: test
 ; A53-LABEL: test:
 ; A53:       // %bb.0: // %entry
@@ -57,45 +57,44 @@ define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg
 ; A53-NEXT:    .cfi_restore_state
 ; A53-NEXT:    b .LBB0_4
 entry:
-  %0 = bitcast %struct1* %fde to i8*
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 40, i1 false)
-  %state = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 4
-  store i16 256, i16* %state, align 8
-  %fd1 = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 2
-  store i32 %fd, i32* %fd1, align 8
-  %force_eof = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 3
-  store i32 0, i32* %force_eof, align 4
-  %func2 = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 6
-  store void (i32, i32, i8*)* %func, void (i32, i32, i8*)** %func2, align 8
-  %arg3 = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 7
-  store i8* %arg, i8** %arg3, align 8
-  %call = tail call i32 (i32, i32, ...) @fcntl(i32 %fd, i32 4, i8* %0) #6
-  %1 = load i32, i32* %fd1, align 8
-  %cmp.i = icmp slt i32 %1, 0
+  tail call void @llvm.memset.p0.i64(ptr align 8 %fde, i8 0, i64 40, i1 false)
+  %state = getelementptr inbounds %struct1, ptr %fde, i64 0, i32 4
+  store i16 256, ptr %state, align 8
+  %fd1 = getelementptr inbounds %struct1, ptr %fde, i64 0, i32 2
+  store i32 %fd, ptr %fd1, align 8
+  %force_eof = getelementptr inbounds %struct1, ptr %fde, i64 0, i32 3
+  store i32 0, ptr %force_eof, align 4
+  %func2 = getelementptr inbounds %struct1, ptr %fde, i64 0, i32 6
+  store ptr %func, ptr %func2, align 8
+  %arg3 = getelementptr inbounds %struct1, ptr %fde, i64 0, i32 7
+  store ptr %arg, ptr %arg3, align 8
+  %call = tail call i32 (i32, i32, ...) @fcntl(i32 %fd, i32 4, ptr %fde) #6
+  %0 = load i32, ptr %fd1, align 8
+  %cmp.i = icmp slt i32 %0, 0
   br i1 %cmp.i, label %if.then.i, label %while.body.i.preheader
 if.then.i:
   unreachable
 
 while.body.i.preheader:
-  %2 = load i32, i32* @gv0, align 4
-  %3 = icmp eq i32* %fd1, @gv0
-  br i1 %3, label %while.body.i.split, label %while.body.i.split.ver.us.preheader
+  %1 = load i32, ptr @gv0, align 4
+  %2 = icmp eq ptr %fd1, @gv0
+  br i1 %2, label %while.body.i.split, label %while.body.i.split.ver.us.preheader
 
 while.body.i.split.ver.us.preheader:
   br label %while.body.i.split.ver.us
 
 while.body.i.split.ver.us:
-  %.reg2mem21.0 = phi i32 [ %mul.i.ver.us, %while.body.i.split.ver.us ], [ %2, %while.body.i.split.ver.us.preheader ]
+  %.reg2mem21.0 = phi i32 [ %mul.i.ver.us, %while.body.i.split.ver.us ], [ %1, %while.body.i.split.ver.us.preheader ]
   %mul.i.ver.us = shl nsw i32 %.reg2mem21.0, 1
-  %4 = icmp sgt i32 %mul.i.ver.us, %1
-  br i1 %4, label %while.end.i, label %while.body.i.split.ver.us
+  %3 = icmp sgt i32 %mul.i.ver.us, %0
+  br i1 %3, label %while.end.i, label %while.body.i.split.ver.us
 
 while.body.i.split:
   br label %while.body.i.split
 
 while.end.i:
-  %call.i = tail call i8* @foo()
-  store i8* %call.i, i8** bitcast (%struct1*** @gv1 to i8**), align 8
+  %call.i = tail call ptr @foo()
+  store ptr %call.i, ptr @gv1, align 8
   br label %exit
 
 exit:
@@ -104,7 +103,7 @@ exit:
 
 ; TODO: rev16?
 
-define void @rotate16_in_place(i8* %p) {
+define void @rotate16_in_place(ptr %p) {
 ; A53-LABEL: rotate16_in_place:
 ; A53:       // %bb.0:
 ; A53-NEXT:    ldrb w8, [x0, #1]
@@ -112,18 +111,17 @@ define void @rotate16_in_place(i8* %p) {
 ; A53-NEXT:    strb w8, [x0]
 ; A53-NEXT:    strb w9, [x0, #1]
 ; A53-NEXT:    ret
-  %p0 = getelementptr i8, i8* %p, i64 0
-  %p1 = getelementptr i8, i8* %p, i64 1
-  %i0 = load i8, i8* %p0, align 1
-  %i1 = load i8, i8* %p1, align 1
-  store i8 %i1, i8* %p0, align 1
-  store i8 %i0, i8* %p1, align 1
+  %p1 = getelementptr i8, ptr %p, i64 1
+  %i0 = load i8, ptr %p, align 1
+  %i1 = load i8, ptr %p1, align 1
+  store i8 %i1, ptr %p, align 1
+  store i8 %i0, ptr %p1, align 1
   ret void
 }
 
 ; TODO: rev16?
 
-define void @rotate16(i8* %p, i8* %q) {
+define void @rotate16(ptr %p, ptr %q) {
 ; A53-LABEL: rotate16:
 ; A53:       // %bb.0:
 ; A53-NEXT:    ldrb w8, [x0, #1]
@@ -131,87 +129,81 @@ define void @rotate16(i8* %p, i8* %q) {
 ; A53-NEXT:    strb w8, [x1]
 ; A53-NEXT:    strb w9, [x1, #1]
 ; A53-NEXT:    ret
-  %p0 = getelementptr i8, i8* %p, i64 0
-  %p1 = getelementptr i8, i8* %p, i64 1
-  %q0 = getelementptr i8, i8* %q, i64 0
-  %q1 = getelementptr i8, i8* %q, i64 1
-  %i0 = load i8, i8* %p0, align 1
-  %i1 = load i8, i8* %p1, align 1
-  store i8 %i1, i8* %q0, align 1
-  store i8 %i0, i8* %q1, align 1
+  %p1 = getelementptr i8, ptr %p, i64 1
+  %q1 = getelementptr i8, ptr %q, i64 1
+  %i0 = load i8, ptr %p, align 1
+  %i1 = load i8, ptr %p1, align 1
+  store i8 %i1, ptr %q, align 1
+  store i8 %i0, ptr %q1, align 1
   ret void
 }
 
-define void @rotate32_in_place(i16* %p) {
+define void @rotate32_in_place(ptr %p) {
 ; A53-LABEL: rotate32_in_place:
 ; A53:       // %bb.0:
 ; A53-NEXT:    ldr w8, [x0]
 ; A53-NEXT:    ror w8, w8, #16
 ; A53-NEXT:    str w8, [x0]
 ; A53-NEXT:    ret
-  %p0 = getelementptr i16, i16* %p, i64 0
-  %p1 = getelementptr i16, i16* %p, i64 1
-  %i0 = load i16, i16* %p0, align 2
-  %i1 = load i16, i16* %p1, align 2
-  store i16 %i1, i16* %p0, align 2
-  store i16 %i0, i16* %p1, align 2
+  %p1 = getelementptr i16, ptr %p, i64 1
+  %i0 = load i16, ptr %p, align 2
+  %i1 = load i16, ptr %p1, align 2
+  store i16 %i1, ptr %p, align 2
+  store i16 %i0, ptr %p1, align 2
   ret void
 }
 
-define void @rotate32(i16* %p) {
+define void @rotate32(ptr %p) {
 ; A53-LABEL: rotate32:
 ; A53:       // %bb.0:
 ; A53-NEXT:    ldr w8, [x0]
 ; A53-NEXT:    ror w8, w8, #16
 ; A53-NEXT:    str w8, [x0, #84]
 ; A53-NEXT:    ret
-  %p0 = getelementptr i16, i16* %p, i64 0
-  %p1 = getelementptr i16, i16* %p, i64 1
-  %p42 = getelementptr i16, i16* %p, i64 42
-  %p43 = getelementptr i16, i16* %p, i64 43
-  %i0 = load i16, i16* %p0, align 2
-  %i1 = load i16, i16* %p1, align 2
-  store i16 %i1, i16* %p42, align 2
-  store i16 %i0, i16* %p43, align 2
+  %p1 = getelementptr i16, ptr %p, i64 1
+  %p42 = getelementptr i16, ptr %p, i64 42
+  %p43 = getelementptr i16, ptr %p, i64 43
+  %i0 = load i16, ptr %p, align 2
+  %i1 = load i16, ptr %p1, align 2
+  store i16 %i1, ptr %p42, align 2
+  store i16 %i0, ptr %p43, align 2
   ret void
 }
 
 ; Prefer paired memops over rotate.
 
-define void @rotate64_in_place(i32* %p) {
+define void @rotate64_in_place(ptr %p) {
 ; A53-LABEL: rotate64_in_place:
 ; A53:       // %bb.0:
 ; A53-NEXT:    ldp w9, w8, [x0]
 ; A53-NEXT:    stp w8, w9, [x0]
 ; A53-NEXT:    ret
-  %p0 = getelementptr i32, i32* %p, i64 0
-  %p1 = getelementptr i32, i32* %p, i64 1
-  %i0 = load i32, i32* %p0, align 4
-  %i1 = load i32, i32* %p1, align 4
-  store i32 %i1, i32* %p0, align 4
-  store i32 %i0, i32* %p1, align 4
+  %p1 = getelementptr i32, ptr %p, i64 1
+  %i0 = load i32, ptr %p, align 4
+  %i1 = load i32, ptr %p1, align 4
+  store i32 %i1, ptr %p, align 4
+  store i32 %i0, ptr %p1, align 4
   ret void
 }
 
 ; Prefer paired memops over rotate.
 
-define void @rotate64(i32* %p) {
+define void @rotate64(ptr %p) {
 ; A53-LABEL: rotate64:
 ; A53:       // %bb.0:
 ; A53-NEXT:    ldp w9, w8, [x0]
 ; A53-NEXT:    stp w8, w9, [x0, #8]
 ; A53-NEXT:    ret
-  %p0 = getelementptr i32, i32* %p, i64 0
-  %p1 = getelementptr i32, i32* %p, i64 1
-  %p2 = getelementptr i32, i32* %p, i64 2
-  %p3 = getelementptr i32, i32* %p, i64 3
-  %i0 = load i32, i32* %p0, align 4
-  %i1 = load i32, i32* %p1, align 4
-  store i32 %i1, i32* %p2, align 4
-  store i32 %i0, i32* %p3, align 4
+  %p1 = getelementptr i32, ptr %p, i64 1
+  %p2 = getelementptr i32, ptr %p, i64 2
+  %p3 = getelementptr i32, ptr %p, i64 3
+  %i0 = load i32, ptr %p, align 4
+  %i1 = load i32, ptr %p1, align 4
+  store i32 %i1, ptr %p2, align 4
+  store i32 %i0, ptr %p3, align 4
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 declare i32 @fcntl(i32, i32, ...)
-declare noalias i8* @foo()
+declare noalias ptr @foo()

diff  --git a/llvm/test/CodeGen/AArch64/merge-trunc-store.ll b/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
index e1d8d330967b3..0f2cb775e98e0 100644
--- a/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
+++ b/llvm/test/CodeGen/AArch64/merge-trunc-store.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=aarch64--    | FileCheck %s --check-prefixes=CHECK,LE
 ; RUN: llc < %s -mtriple=aarch64_be-- | FileCheck %s --check-prefixes=CHECK,BE
 
-define void @le_i16_to_i8(i16 %x, i8* %p0) {
+define void @le_i16_to_i8(i16 %x, ptr %p0) {
 ; LE-LABEL: le_i16_to_i8:
 ; LE:       // %bb.0:
 ; LE-NEXT:    strh w0, [x1]
@@ -17,13 +17,13 @@ define void @le_i16_to_i8(i16 %x, i8* %p0) {
   %sh1 = lshr i16 %x, 8
   %t0 = trunc i16 %x to i8
   %t1 = trunc i16 %sh1 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  store i8 %t0, i8* %p0, align 1
-  store i8 %t1, i8* %p1, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  store i8 %t0, ptr %p0, align 1
+  store i8 %t1, ptr %p1, align 1
   ret void
 }
 
-define void @le_i16_to_i8_order(i16 %x, i8* %p0) {
+define void @le_i16_to_i8_order(i16 %x, ptr %p0) {
 ; LE-LABEL: le_i16_to_i8_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    strh w0, [x1]
@@ -38,13 +38,13 @@ define void @le_i16_to_i8_order(i16 %x, i8* %p0) {
   %sh1 = lshr i16 %x, 8
   %t0 = trunc i16 %x to i8
   %t1 = trunc i16 %sh1 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  store i8 %t1, i8* %p1, align 1
-  store i8 %t0, i8* %p0, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  store i8 %t1, ptr %p1, align 1
+  store i8 %t0, ptr %p0, align 1
   ret void
 }
 
-define void @be_i16_to_i8_offset(i16 %x, i8* %p0) {
+define void @be_i16_to_i8_offset(i16 %x, ptr %p0) {
 ; LE-LABEL: be_i16_to_i8_offset:
 ; LE:       // %bb.0:
 ; LE-NEXT:    rev w8, w0
@@ -59,14 +59,14 @@ define void @be_i16_to_i8_offset(i16 %x, i8* %p0) {
   %sh1 = lshr i16 %x, 8
   %t0 = trunc i16 %x to i8
   %t1 = trunc i16 %sh1 to i8
-  %p11 = getelementptr inbounds i8, i8* %p0, i64 11
-  %p12 = getelementptr inbounds i8, i8* %p0, i64 12
-  store i8 %t0, i8* %p12, align 1
-  store i8 %t1, i8* %p11, align 1
+  %p11 = getelementptr inbounds i8, ptr %p0, i64 11
+  %p12 = getelementptr inbounds i8, ptr %p0, i64 12
+  store i8 %t0, ptr %p12, align 1
+  store i8 %t1, ptr %p11, align 1
   ret void
 }
 
-define void @be_i16_to_i8_order(i16 %x, i8* %p0) {
+define void @be_i16_to_i8_order(i16 %x, ptr %p0) {
 ; LE-LABEL: be_i16_to_i8_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    rev w8, w0
@@ -81,13 +81,13 @@ define void @be_i16_to_i8_order(i16 %x, i8* %p0) {
   %sh1 = lshr i16 %x, 8
   %t0 = trunc i16 %x to i8
   %t1 = trunc i16 %sh1 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  store i8 %t1, i8* %p0, align 1
-  store i8 %t0, i8* %p1, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  store i8 %t1, ptr %p0, align 1
+  store i8 %t0, ptr %p1, align 1
   ret void
 }
 
-define void @le_i32_to_i8(i32 %x, i8* %p0) {
+define void @le_i32_to_i8(i32 %x, ptr %p0) {
 ; LE-LABEL: le_i32_to_i8:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str w0, [x1]
@@ -105,17 +105,17 @@ define void @le_i32_to_i8(i32 %x, i8* %p0) {
   %t1 = trunc i32 %sh1 to i8
   %t2 = trunc i32 %sh2 to i8
   %t3 = trunc i32 %sh3 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  store i8 %t0, i8* %p0, align 1
-  store i8 %t1, i8* %p1, align 1
-  store i8 %t2, i8* %p2, align 1
-  store i8 %t3, i8* %p3, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  store i8 %t0, ptr %p0, align 1
+  store i8 %t1, ptr %p1, align 1
+  store i8 %t2, ptr %p2, align 1
+  store i8 %t3, ptr %p3, align 1
   ret void
 }
 
-define void @le_i32_to_i8_order(i32 %x, i8* %p0) {
+define void @le_i32_to_i8_order(i32 %x, ptr %p0) {
 ; LE-LABEL: le_i32_to_i8_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str w0, [x1]
@@ -133,17 +133,17 @@ define void @le_i32_to_i8_order(i32 %x, i8* %p0) {
   %t1 = trunc i32 %sh1 to i8
   %t2 = trunc i32 %sh2 to i8
   %t3 = trunc i32 %sh3 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  store i8 %t3, i8* %p3, align 1
-  store i8 %t1, i8* %p1, align 1
-  store i8 %t0, i8* %p0, align 1
-  store i8 %t2, i8* %p2, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  store i8 %t3, ptr %p3, align 1
+  store i8 %t1, ptr %p1, align 1
+  store i8 %t0, ptr %p0, align 1
+  store i8 %t2, ptr %p2, align 1
   ret void
 }
 
-define void @be_i32_to_i8(i32 %x, i8* %p0) {
+define void @be_i32_to_i8(i32 %x, ptr %p0) {
 ; LE-LABEL: be_i32_to_i8:
 ; LE:       // %bb.0:
 ; LE-NEXT:    rev w8, w0
@@ -161,17 +161,17 @@ define void @be_i32_to_i8(i32 %x, i8* %p0) {
   %t1 = trunc i32 %sh1 to i8
   %t2 = trunc i32 %sh2 to i8
   %t3 = trunc i32 %sh3 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  store i8 %t0, i8* %p3, align 1
-  store i8 %t1, i8* %p2, align 1
-  store i8 %t2, i8* %p1, align 1
-  store i8 %t3, i8* %p0, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  store i8 %t0, ptr %p3, align 1
+  store i8 %t1, ptr %p2, align 1
+  store i8 %t2, ptr %p1, align 1
+  store i8 %t3, ptr %p0, align 1
   ret void
 }
 
-define void @be_i32_to_i8_order(i32 %x, i8* %p0) {
+define void @be_i32_to_i8_order(i32 %x, ptr %p0) {
 ; LE-LABEL: be_i32_to_i8_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    rev w8, w0
@@ -189,17 +189,17 @@ define void @be_i32_to_i8_order(i32 %x, i8* %p0) {
   %t1 = trunc i32 %sh1 to i8
   %t2 = trunc i32 %sh2 to i8
   %t3 = trunc i32 %sh3 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  store i8 %t3, i8* %p0, align 1
-  store i8 %t2, i8* %p1, align 1
-  store i8 %t0, i8* %p3, align 1
-  store i8 %t1, i8* %p2, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  store i8 %t3, ptr %p0, align 1
+  store i8 %t2, ptr %p1, align 1
+  store i8 %t0, ptr %p3, align 1
+  store i8 %t1, ptr %p2, align 1
   ret void
 }
 
-define void @le_i32_to_i16(i32 %x, i16* %p0) {
+define void @le_i32_to_i16(i32 %x, ptr %p0) {
 ; LE-LABEL: le_i32_to_i16:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str w0, [x1]
@@ -213,13 +213,13 @@ define void @le_i32_to_i16(i32 %x, i16* %p0) {
   %sh1 = lshr i32 %x, 16
   %t0 = trunc i32 %x to i16
   %t1 = trunc i32 %sh1 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  store i16 %t0, i16* %p0, align 2
-  store i16 %t1, i16* %p1, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  store i16 %t0, ptr %p0, align 2
+  store i16 %t1, ptr %p1, align 2
   ret void
 }
 
-define void @le_i32_to_i16_order(i32 %x, i16* %p0) {
+define void @le_i32_to_i16_order(i32 %x, ptr %p0) {
 ; LE-LABEL: le_i32_to_i16_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str w0, [x1]
@@ -233,13 +233,13 @@ define void @le_i32_to_i16_order(i32 %x, i16* %p0) {
   %sh1 = lshr i32 %x, 16
   %t0 = trunc i32 %x to i16
   %t1 = trunc i32 %sh1 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  store i16 %t1, i16* %p1, align 2
-  store i16 %t0, i16* %p0, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  store i16 %t1, ptr %p1, align 2
+  store i16 %t0, ptr %p0, align 2
   ret void
 }
 
-define void @be_i32_to_i16(i32 %x, i16* %p0) {
+define void @be_i32_to_i16(i32 %x, ptr %p0) {
 ; LE-LABEL: be_i32_to_i16:
 ; LE:       // %bb.0:
 ; LE-NEXT:    ror w8, w0, #16
@@ -253,13 +253,13 @@ define void @be_i32_to_i16(i32 %x, i16* %p0) {
   %sh1 = lshr i32 %x, 16
   %t0 = trunc i32 %x to i16
   %t1 = trunc i32 %sh1 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  store i16 %t0, i16* %p1, align 2
-  store i16 %t1, i16* %p0, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  store i16 %t0, ptr %p1, align 2
+  store i16 %t1, ptr %p0, align 2
   ret void
 }
 
-define void @be_i32_to_i16_order(i32 %x, i16* %p0) {
+define void @be_i32_to_i16_order(i32 %x, ptr %p0) {
 ; LE-LABEL: be_i32_to_i16_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    ror w8, w0, #16
@@ -273,13 +273,13 @@ define void @be_i32_to_i16_order(i32 %x, i16* %p0) {
   %sh1 = lshr i32 %x, 16
   %t0 = trunc i32 %x to i16
   %t1 = trunc i32 %sh1 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  store i16 %t1, i16* %p0, align 2
-  store i16 %t0, i16* %p1, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  store i16 %t1, ptr %p0, align 2
+  store i16 %t0, ptr %p1, align 2
   ret void
 }
 
-define void @le_i64_to_i8(i64 %x, i8* %p0) {
+define void @le_i64_to_i8(i64 %x, ptr %p0) {
 ; LE-LABEL: le_i64_to_i8:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str x0, [x1]
@@ -305,25 +305,25 @@ define void @le_i64_to_i8(i64 %x, i8* %p0) {
   %t5 = trunc i64 %sh5 to i8
   %t6 = trunc i64 %sh6 to i8
   %t7 = trunc i64 %sh7 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  %p4 = getelementptr inbounds i8, i8* %p0, i64 4
-  %p5 = getelementptr inbounds i8, i8* %p0, i64 5
-  %p6 = getelementptr inbounds i8, i8* %p0, i64 6
-  %p7 = getelementptr inbounds i8, i8* %p0, i64 7
-  store i8 %t0, i8* %p0, align 1
-  store i8 %t1, i8* %p1, align 1
-  store i8 %t2, i8* %p2, align 1
-  store i8 %t3, i8* %p3, align 1
-  store i8 %t4, i8* %p4, align 1
-  store i8 %t5, i8* %p5, align 1
-  store i8 %t6, i8* %p6, align 1
-  store i8 %t7, i8* %p7, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  %p4 = getelementptr inbounds i8, ptr %p0, i64 4
+  %p5 = getelementptr inbounds i8, ptr %p0, i64 5
+  %p6 = getelementptr inbounds i8, ptr %p0, i64 6
+  %p7 = getelementptr inbounds i8, ptr %p0, i64 7
+  store i8 %t0, ptr %p0, align 1
+  store i8 %t1, ptr %p1, align 1
+  store i8 %t2, ptr %p2, align 1
+  store i8 %t3, ptr %p3, align 1
+  store i8 %t4, ptr %p4, align 1
+  store i8 %t5, ptr %p5, align 1
+  store i8 %t6, ptr %p6, align 1
+  store i8 %t7, ptr %p7, align 1
   ret void
 }
 
-define void @le_i64_to_i8_order(i64 %x, i8* %p0) {
+define void @le_i64_to_i8_order(i64 %x, ptr %p0) {
 ; LE-LABEL: le_i64_to_i8_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str x0, [x1]
@@ -349,25 +349,25 @@ define void @le_i64_to_i8_order(i64 %x, i8* %p0) {
   %t5 = trunc i64 %sh5 to i8
   %t6 = trunc i64 %sh6 to i8
   %t7 = trunc i64 %sh7 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  %p4 = getelementptr inbounds i8, i8* %p0, i64 4
-  %p5 = getelementptr inbounds i8, i8* %p0, i64 5
-  %p6 = getelementptr inbounds i8, i8* %p0, i64 6
-  %p7 = getelementptr inbounds i8, i8* %p0, i64 7
-  store i8 %t5, i8* %p5, align 1
-  store i8 %t0, i8* %p0, align 1
-  store i8 %t3, i8* %p3, align 1
-  store i8 %t7, i8* %p7, align 1
-  store i8 %t1, i8* %p1, align 1
-  store i8 %t6, i8* %p6, align 1
-  store i8 %t2, i8* %p2, align 1
-  store i8 %t4, i8* %p4, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  %p4 = getelementptr inbounds i8, ptr %p0, i64 4
+  %p5 = getelementptr inbounds i8, ptr %p0, i64 5
+  %p6 = getelementptr inbounds i8, ptr %p0, i64 6
+  %p7 = getelementptr inbounds i8, ptr %p0, i64 7
+  store i8 %t5, ptr %p5, align 1
+  store i8 %t0, ptr %p0, align 1
+  store i8 %t3, ptr %p3, align 1
+  store i8 %t7, ptr %p7, align 1
+  store i8 %t1, ptr %p1, align 1
+  store i8 %t6, ptr %p6, align 1
+  store i8 %t2, ptr %p2, align 1
+  store i8 %t4, ptr %p4, align 1
   ret void
 }
 
-define void @be_i64_to_i8(i64 %x, i8* %p0) {
+define void @be_i64_to_i8(i64 %x, ptr %p0) {
 ; LE-LABEL: be_i64_to_i8:
 ; LE:       // %bb.0:
 ; LE-NEXT:    rev x8, x0
@@ -393,25 +393,25 @@ define void @be_i64_to_i8(i64 %x, i8* %p0) {
   %t5 = trunc i64 %sh5 to i8
   %t6 = trunc i64 %sh6 to i8
   %t7 = trunc i64 %sh7 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  %p4 = getelementptr inbounds i8, i8* %p0, i64 4
-  %p5 = getelementptr inbounds i8, i8* %p0, i64 5
-  %p6 = getelementptr inbounds i8, i8* %p0, i64 6
-  %p7 = getelementptr inbounds i8, i8* %p0, i64 7
-  store i8 %t0, i8* %p7, align 1
-  store i8 %t1, i8* %p6, align 1
-  store i8 %t2, i8* %p5, align 1
-  store i8 %t3, i8* %p4, align 1
-  store i8 %t4, i8* %p3, align 1
-  store i8 %t5, i8* %p2, align 1
-  store i8 %t6, i8* %p1, align 1
-  store i8 %t7, i8* %p0, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  %p4 = getelementptr inbounds i8, ptr %p0, i64 4
+  %p5 = getelementptr inbounds i8, ptr %p0, i64 5
+  %p6 = getelementptr inbounds i8, ptr %p0, i64 6
+  %p7 = getelementptr inbounds i8, ptr %p0, i64 7
+  store i8 %t0, ptr %p7, align 1
+  store i8 %t1, ptr %p6, align 1
+  store i8 %t2, ptr %p5, align 1
+  store i8 %t3, ptr %p4, align 1
+  store i8 %t4, ptr %p3, align 1
+  store i8 %t5, ptr %p2, align 1
+  store i8 %t6, ptr %p1, align 1
+  store i8 %t7, ptr %p0, align 1
   ret void
 }
 
-define void @be_i64_to_i8_order(i64 %x, i8* %p0) {
+define void @be_i64_to_i8_order(i64 %x, ptr %p0) {
 ; LE-LABEL: be_i64_to_i8_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    rev x8, x0
@@ -437,25 +437,25 @@ define void @be_i64_to_i8_order(i64 %x, i8* %p0) {
   %t5 = trunc i64 %sh5 to i8
   %t6 = trunc i64 %sh6 to i8
   %t7 = trunc i64 %sh7 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  %p4 = getelementptr inbounds i8, i8* %p0, i64 4
-  %p5 = getelementptr inbounds i8, i8* %p0, i64 5
-  %p6 = getelementptr inbounds i8, i8* %p0, i64 6
-  %p7 = getelementptr inbounds i8, i8* %p0, i64 7
-  store i8 %t7, i8* %p0, align 1
-  store i8 %t6, i8* %p1, align 1
-  store i8 %t5, i8* %p2, align 1
-  store i8 %t4, i8* %p3, align 1
-  store i8 %t3, i8* %p4, align 1
-  store i8 %t2, i8* %p5, align 1
-  store i8 %t1, i8* %p6, align 1
-  store i8 %t0, i8* %p7, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  %p4 = getelementptr inbounds i8, ptr %p0, i64 4
+  %p5 = getelementptr inbounds i8, ptr %p0, i64 5
+  %p6 = getelementptr inbounds i8, ptr %p0, i64 6
+  %p7 = getelementptr inbounds i8, ptr %p0, i64 7
+  store i8 %t7, ptr %p0, align 1
+  store i8 %t6, ptr %p1, align 1
+  store i8 %t5, ptr %p2, align 1
+  store i8 %t4, ptr %p3, align 1
+  store i8 %t3, ptr %p4, align 1
+  store i8 %t2, ptr %p5, align 1
+  store i8 %t1, ptr %p6, align 1
+  store i8 %t0, ptr %p7, align 1
   ret void
 }
 
-define void @le_i64_to_i16(i64 %x, i16* %p0) {
+define void @le_i64_to_i16(i64 %x, ptr %p0) {
 ; LE-LABEL: le_i64_to_i16:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str x0, [x1]
@@ -478,17 +478,17 @@ define void @le_i64_to_i16(i64 %x, i16* %p0) {
   %t1 = trunc i64 %sh1 to i16
   %t2 = trunc i64 %sh2 to i16
   %t3 = trunc i64 %sh3 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  %p2 = getelementptr inbounds i16, i16* %p0, i64 2
-  %p3 = getelementptr inbounds i16, i16* %p0, i64 3
-  store i16 %t0, i16* %p0, align 2
-  store i16 %t1, i16* %p1, align 2
-  store i16 %t2, i16* %p2, align 2
-  store i16 %t3, i16* %p3, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i16, ptr %p0, i64 3
+  store i16 %t0, ptr %p0, align 2
+  store i16 %t1, ptr %p1, align 2
+  store i16 %t2, ptr %p2, align 2
+  store i16 %t3, ptr %p3, align 2
   ret void
 }
 
-define void @le_i64_to_i16_order(i64 %x, i16* %p0) {
+define void @le_i64_to_i16_order(i64 %x, ptr %p0) {
 ; LE-LABEL: le_i64_to_i16_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str x0, [x1]
@@ -511,17 +511,17 @@ define void @le_i64_to_i16_order(i64 %x, i16* %p0) {
   %t1 = trunc i64 %sh1 to i16
   %t2 = trunc i64 %sh2 to i16
   %t3 = trunc i64 %sh3 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  %p2 = getelementptr inbounds i16, i16* %p0, i64 2
-  %p3 = getelementptr inbounds i16, i16* %p0, i64 3
-  store i16 %t1, i16* %p1, align 2
-  store i16 %t3, i16* %p3, align 2
-  store i16 %t0, i16* %p0, align 2
-  store i16 %t2, i16* %p2, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i16, ptr %p0, i64 3
+  store i16 %t1, ptr %p1, align 2
+  store i16 %t3, ptr %p3, align 2
+  store i16 %t0, ptr %p0, align 2
+  store i16 %t2, ptr %p2, align 2
   ret void
 }
 
-define void @be_i64_to_i16(i64 %x, i16* %p0) {
+define void @be_i64_to_i16(i64 %x, ptr %p0) {
 ; LE-LABEL: be_i64_to_i16:
 ; LE:       // %bb.0:
 ; LE-NEXT:    lsr x8, x0, #32
@@ -543,17 +543,17 @@ define void @be_i64_to_i16(i64 %x, i16* %p0) {
   %t1 = trunc i64 %sh1 to i16
   %t2 = trunc i64 %sh2 to i16
   %t3 = trunc i64 %sh3 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  %p2 = getelementptr inbounds i16, i16* %p0, i64 2
-  %p3 = getelementptr inbounds i16, i16* %p0, i64 3
-  store i16 %t0, i16* %p3, align 2
-  store i16 %t1, i16* %p2, align 2
-  store i16 %t2, i16* %p1, align 2
-  store i16 %t3, i16* %p0, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i16, ptr %p0, i64 3
+  store i16 %t0, ptr %p3, align 2
+  store i16 %t1, ptr %p2, align 2
+  store i16 %t2, ptr %p1, align 2
+  store i16 %t3, ptr %p0, align 2
   ret void
 }
 
-define void @be_i64_to_i16_order(i64 %x, i16* %p0) {
+define void @be_i64_to_i16_order(i64 %x, ptr %p0) {
 ; LE-LABEL: be_i64_to_i16_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    lsr x8, x0, #48
@@ -576,17 +576,17 @@ define void @be_i64_to_i16_order(i64 %x, i16* %p0) {
   %t1 = trunc i64 %sh1 to i16
   %t2 = trunc i64 %sh2 to i16
   %t3 = trunc i64 %sh3 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  %p2 = getelementptr inbounds i16, i16* %p0, i64 2
-  %p3 = getelementptr inbounds i16, i16* %p0, i64 3
-  store i16 %t0, i16* %p3, align 2
-  store i16 %t3, i16* %p0, align 2
-  store i16 %t2, i16* %p1, align 2
-  store i16 %t1, i16* %p2, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i16, ptr %p0, i64 3
+  store i16 %t0, ptr %p3, align 2
+  store i16 %t3, ptr %p0, align 2
+  store i16 %t2, ptr %p1, align 2
+  store i16 %t1, ptr %p2, align 2
   ret void
 }
 
-define void @le_i64_to_i32(i64 %x, i32* %p0) {
+define void @le_i64_to_i32(i64 %x, ptr %p0) {
 ; LE-LABEL: le_i64_to_i32:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str x0, [x1]
@@ -600,13 +600,13 @@ define void @le_i64_to_i32(i64 %x, i32* %p0) {
   %sh1 = lshr i64 %x, 32
   %t0 = trunc i64 %x to i32
   %t1 = trunc i64 %sh1 to i32
-  %p1 = getelementptr inbounds i32, i32* %p0, i64 1
-  store i32 %t0, i32* %p0, align 4
-  store i32 %t1, i32* %p1, align 4
+  %p1 = getelementptr inbounds i32, ptr %p0, i64 1
+  store i32 %t0, ptr %p0, align 4
+  store i32 %t1, ptr %p1, align 4
   ret void
 }
 
-define void @le_i64_to_i32_order(i64 %x, i32* %p0) {
+define void @le_i64_to_i32_order(i64 %x, ptr %p0) {
 ; LE-LABEL: le_i64_to_i32_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    str x0, [x1]
@@ -620,13 +620,13 @@ define void @le_i64_to_i32_order(i64 %x, i32* %p0) {
   %sh1 = lshr i64 %x, 32
   %t0 = trunc i64 %x to i32
   %t1 = trunc i64 %sh1 to i32
-  %p1 = getelementptr inbounds i32, i32* %p0, i64 1
-  store i32 %t1, i32* %p1, align 4
-  store i32 %t0, i32* %p0, align 4
+  %p1 = getelementptr inbounds i32, ptr %p0, i64 1
+  store i32 %t1, ptr %p1, align 4
+  store i32 %t0, ptr %p0, align 4
   ret void
 }
 
-define void @be_i64_to_i32(i64 %x, i32* %p0) {
+define void @be_i64_to_i32(i64 %x, ptr %p0) {
 ; LE-LABEL: be_i64_to_i32:
 ; LE:       // %bb.0:
 ; LE-NEXT:    ror x8, x0, #32
@@ -640,13 +640,13 @@ define void @be_i64_to_i32(i64 %x, i32* %p0) {
   %sh1 = lshr i64 %x, 32
   %t0 = trunc i64 %x to i32
   %t1 = trunc i64 %sh1 to i32
-  %p1 = getelementptr inbounds i32, i32* %p0, i64 1
-  store i32 %t0, i32* %p1, align 4
-  store i32 %t1, i32* %p0, align 4
+  %p1 = getelementptr inbounds i32, ptr %p0, i64 1
+  store i32 %t0, ptr %p1, align 4
+  store i32 %t1, ptr %p0, align 4
   ret void
 }
 
-define void @be_i64_to_i32_order(i64 %x, i32* %p0) {
+define void @be_i64_to_i32_order(i64 %x, ptr %p0) {
 ; LE-LABEL: be_i64_to_i32_order:
 ; LE:       // %bb.0:
 ; LE-NEXT:    ror x8, x0, #32
@@ -660,15 +660,15 @@ define void @be_i64_to_i32_order(i64 %x, i32* %p0) {
   %sh1 = lshr i64 %x, 32
   %t0 = trunc i64 %x to i32
   %t1 = trunc i64 %sh1 to i32
-  %p1 = getelementptr inbounds i32, i32* %p0, i64 1
-  store i32 %t1, i32* %p0, align 4
-  store i32 %t0, i32* %p1, align 4
+  %p1 = getelementptr inbounds i32, ptr %p0, i64 1
+  store i32 %t1, ptr %p0, align 4
+  store i32 %t0, ptr %p1, align 4
   ret void
 }
 
 ; Negative test - not consecutive addresses
 
-define void @i64_to_i32_wrong_addr(i64 %x, i32* %p0) {
+define void @i64_to_i32_wrong_addr(i64 %x, ptr %p0) {
 ; CHECK-LABEL: i64_to_i32_wrong_addr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x8, x0, #32
@@ -678,15 +678,15 @@ define void @i64_to_i32_wrong_addr(i64 %x, i32* %p0) {
   %sh1 = lshr i64 %x, 32
   %t0 = trunc i64 %x to i32
   %t1 = trunc i64 %sh1 to i32
-  %p3 = getelementptr inbounds i32, i32* %p0, i64 3
-  store i32 %t1, i32* %p3, align 4
-  store i32 %t0, i32* %p0, align 4
+  %p3 = getelementptr inbounds i32, ptr %p0, i64 3
+  store i32 %t1, ptr %p3, align 4
+  store i32 %t0, ptr %p0, align 4
   ret void
 }
 
 ; Negative test - addresses don't line up with shift amounts
 
-define void @i64_to_i16_wrong_order(i64 %x, i16* %p0) {
+define void @i64_to_i16_wrong_order(i64 %x, ptr %p0) {
 ; CHECK-LABEL: i64_to_i16_wrong_order:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x8, x0, #48
@@ -704,19 +704,19 @@ define void @i64_to_i16_wrong_order(i64 %x, i16* %p0) {
   %t1 = trunc i64 %sh1 to i16
   %t2 = trunc i64 %sh2 to i16
   %t3 = trunc i64 %sh3 to i16
-  %p1 = getelementptr inbounds i16, i16* %p0, i64 1
-  %p2 = getelementptr inbounds i16, i16* %p0, i64 2
-  %p3 = getelementptr inbounds i16, i16* %p0, i64 3
-  store i16 %t3, i16* %p3, align 2
-  store i16 %t1, i16* %p2, align 2
-  store i16 %t2, i16* %p1, align 2
-  store i16 %t0, i16* %p0, align 2
+  %p1 = getelementptr inbounds i16, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i16, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i16, ptr %p0, i64 3
+  store i16 %t3, ptr %p3, align 2
+  store i16 %t1, ptr %p2, align 2
+  store i16 %t2, ptr %p1, align 2
+  store i16 %t0, ptr %p0, align 2
   ret void
 }
 
 ; Negative test - no store of 't1'
 
-define void @i32_to_i8_incomplete(i32 %x, i8* %p0) {
+define void @i32_to_i8_incomplete(i32 %x, ptr %p0) {
 ; CHECK-LABEL: i32_to_i8_incomplete:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #16
@@ -732,18 +732,18 @@ define void @i32_to_i8_incomplete(i32 %x, i8* %p0) {
   %t1 = trunc i32 %sh1 to i8
   %t2 = trunc i32 %sh2 to i8
   %t3 = trunc i32 %sh3 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  store i8 %t0, i8* %p0, align 1
-  store i8 %t2, i8* %p2, align 1
-  store i8 %t3, i8* %p3, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  store i8 %t0, ptr %p0, align 1
+  store i8 %t2, ptr %p2, align 1
+  store i8 %t3, ptr %p3, align 1
   ret void
 }
 
 ; Negative test - no store of 't3'
 
-define void @i64_to_i8_incomplete(i64 %x, i8* %p0) {
+define void @i64_to_i8_incomplete(i64 %x, ptr %p0) {
 ; CHECK-LABEL: i64_to_i8_incomplete:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x8, x0, #56
@@ -775,26 +775,26 @@ define void @i64_to_i8_incomplete(i64 %x, i8* %p0) {
   %t5 = trunc i64 %sh5 to i8
   %t6 = trunc i64 %sh6 to i8
   %t7 = trunc i64 %sh7 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  %p4 = getelementptr inbounds i8, i8* %p0, i64 4
-  %p5 = getelementptr inbounds i8, i8* %p0, i64 5
-  %p6 = getelementptr inbounds i8, i8* %p0, i64 6
-  %p7 = getelementptr inbounds i8, i8* %p0, i64 7
-  store i8 %t7, i8* %p0, align 1
-  store i8 %t6, i8* %p1, align 1
-  store i8 %t5, i8* %p2, align 1
-  store i8 %t4, i8* %p3, align 1
-  store i8 %t2, i8* %p5, align 1
-  store i8 %t1, i8* %p6, align 1
-  store i8 %t0, i8* %p7, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  %p4 = getelementptr inbounds i8, ptr %p0, i64 4
+  %p5 = getelementptr inbounds i8, ptr %p0, i64 5
+  %p6 = getelementptr inbounds i8, ptr %p0, i64 6
+  %p7 = getelementptr inbounds i8, ptr %p0, i64 7
+  store i8 %t7, ptr %p0, align 1
+  store i8 %t6, ptr %p1, align 1
+  store i8 %t5, ptr %p2, align 1
+  store i8 %t4, ptr %p3, align 1
+  store i8 %t2, ptr %p5, align 1
+  store i8 %t1, ptr %p6, align 1
+  store i8 %t0, ptr %p7, align 1
   ret void
 }
 
 ; Negative test - not consecutive addresses
 
-define void @i32_to_i16_wrong_addr(i32 %x, i16* %p0) {
+define void @i32_to_i16_wrong_addr(i32 %x, ptr %p0) {
 ; CHECK-LABEL: i32_to_i16_wrong_addr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #16
@@ -804,15 +804,15 @@ define void @i32_to_i16_wrong_addr(i32 %x, i16* %p0) {
   %sh1 = lshr i32 %x, 16
   %t0 = trunc i32 %x to i16
   %t1 = trunc i32 %sh1 to i16
-  %p2 = getelementptr inbounds i16, i16* %p0, i64 2
-  store i16 %t1, i16* %p2, align 2
-  store i16 %t0, i16* %p0, align 2
+  %p2 = getelementptr inbounds i16, ptr %p0, i64 2
+  store i16 %t1, ptr %p2, align 2
+  store i16 %t0, ptr %p0, align 2
   ret void
 }
 
 ; Negative test - addresses don't line up with shift amounts
 
-define void @i32_to_i8_wrong_order(i32 %x, i8* %p0) {
+define void @i32_to_i8_wrong_order(i32 %x, ptr %p0) {
 ; CHECK-LABEL: i32_to_i8_wrong_order:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #24
@@ -830,12 +830,12 @@ define void @i32_to_i8_wrong_order(i32 %x, i8* %p0) {
   %t1 = trunc i32 %sh1 to i8
   %t2 = trunc i32 %sh2 to i8
   %t3 = trunc i32 %sh3 to i8
-  %p1 = getelementptr inbounds i8, i8* %p0, i64 1
-  %p2 = getelementptr inbounds i8, i8* %p0, i64 2
-  %p3 = getelementptr inbounds i8, i8* %p0, i64 3
-  store i8 %t3, i8* %p1, align 1
-  store i8 %t2, i8* %p0, align 1
-  store i8 %t0, i8* %p3, align 1
-  store i8 %t1, i8* %p2, align 1
+  %p1 = getelementptr inbounds i8, ptr %p0, i64 1
+  %p2 = getelementptr inbounds i8, ptr %p0, i64 2
+  %p3 = getelementptr inbounds i8, ptr %p0, i64 3
+  store i8 %t3, ptr %p1, align 1
+  store i8 %t2, ptr %p0, align 1
+  store i8 %t0, ptr %p3, align 1
+  store i8 %t1, ptr %p2, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll b/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll
index fbaef9cc07516..c467bc7df8402 100644
--- a/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll
+++ b/llvm/test/CodeGen/AArch64/mergestores_noimplicitfloat.ll
@@ -13,9 +13,9 @@ target triple = "arm64-apple-ios10.0.0"
 ; CHECK-DAG: stp [[R0:x[0-9]+]], [[R0:x[0-9]+]], [x0, #16]
 ; CHECK-DAG: stp [[R0:x[0-9]+]], [[R0:x[0-9]+]], [x0]
 
-define void @pr33475(i8* %p0, i8* %p1) noimplicitfloat {
-    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %p0, i8* align 4 %p1, i64 32, i1 false)
+define void @pr33475(ptr %p0, ptr %p1) noimplicitfloat {
+    call void @llvm.memcpy.p0.p0.i64(ptr align 4 %p0, ptr align 4 %p1, i64 32, i1 false)
     ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)

diff  --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll
index 3e8259656f32b..2ef7c54f6d914 100644
--- a/llvm/test/CodeGen/AArch64/midpoint-int.ll
+++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll
@@ -58,7 +58,7 @@ define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind {
 
 ; Values are loaded. Only check signed case.
 
-define i32 @scalar_i32_signed_mem_reg(i32* %a1_addr, i32 %a2) nounwind {
+define i32 @scalar_i32_signed_mem_reg(ptr %a1_addr, i32 %a2) nounwind {
 ; CHECK-LABEL: scalar_i32_signed_mem_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w9, [x0]
@@ -71,7 +71,7 @@ define i32 @scalar_i32_signed_mem_reg(i32* %a1_addr, i32 %a2) nounwind {
 ; CHECK-NEXT:    lsr w10, w10, #1
 ; CHECK-NEXT:    madd w0, w10, w8, w9
 ; CHECK-NEXT:    ret
-  %a1 = load i32, i32* %a1_addr
+  %a1 = load i32, ptr %a1_addr
   %t3 = icmp sgt i32 %a1, %a2 ; signed
   %t4 = select i1 %t3, i32 -1, i32 1
   %t5 = select i1 %t3, i32 %a2, i32 %a1
@@ -83,7 +83,7 @@ define i32 @scalar_i32_signed_mem_reg(i32* %a1_addr, i32 %a2) nounwind {
   ret i32 %a10
 }
 
-define i32 @scalar_i32_signed_reg_mem(i32 %a1, i32* %a2_addr) nounwind {
+define i32 @scalar_i32_signed_reg_mem(i32 %a1, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i32_signed_reg_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w9, [x1]
@@ -96,7 +96,7 @@ define i32 @scalar_i32_signed_reg_mem(i32 %a1, i32* %a2_addr) nounwind {
 ; CHECK-NEXT:    lsr w9, w9, #1
 ; CHECK-NEXT:    madd w0, w9, w8, w0
 ; CHECK-NEXT:    ret
-  %a2 = load i32, i32* %a2_addr
+  %a2 = load i32, ptr %a2_addr
   %t3 = icmp sgt i32 %a1, %a2 ; signed
   %t4 = select i1 %t3, i32 -1, i32 1
   %t5 = select i1 %t3, i32 %a2, i32 %a1
@@ -108,7 +108,7 @@ define i32 @scalar_i32_signed_reg_mem(i32 %a1, i32* %a2_addr) nounwind {
   ret i32 %a10
 }
 
-define i32 @scalar_i32_signed_mem_mem(i32* %a1_addr, i32* %a2_addr) nounwind {
+define i32 @scalar_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i32_signed_mem_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w9, [x0]
@@ -122,8 +122,8 @@ define i32 @scalar_i32_signed_mem_mem(i32* %a1_addr, i32* %a2_addr) nounwind {
 ; CHECK-NEXT:    lsr w10, w10, #1
 ; CHECK-NEXT:    madd w0, w10, w8, w9
 ; CHECK-NEXT:    ret
-  %a1 = load i32, i32* %a1_addr
-  %a2 = load i32, i32* %a2_addr
+  %a1 = load i32, ptr %a1_addr
+  %a2 = load i32, ptr %a2_addr
   %t3 = icmp sgt i32 %a1, %a2 ; signed
   %t4 = select i1 %t3, i32 -1, i32 1
   %t5 = select i1 %t3, i32 %a2, i32 %a1
@@ -189,7 +189,7 @@ define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind {
 
 ; Values are loaded. Only check signed case.
 
-define i64 @scalar_i64_signed_mem_reg(i64* %a1_addr, i64 %a2) nounwind {
+define i64 @scalar_i64_signed_mem_reg(ptr %a1_addr, i64 %a2) nounwind {
 ; CHECK-LABEL: scalar_i64_signed_mem_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -202,7 +202,7 @@ define i64 @scalar_i64_signed_mem_reg(i64* %a1_addr, i64 %a2) nounwind {
 ; CHECK-NEXT:    lsr x10, x10, #1
 ; CHECK-NEXT:    madd x0, x10, x8, x9
 ; CHECK-NEXT:    ret
-  %a1 = load i64, i64* %a1_addr
+  %a1 = load i64, ptr %a1_addr
   %t3 = icmp sgt i64 %a1, %a2 ; signed
   %t4 = select i1 %t3, i64 -1, i64 1
   %t5 = select i1 %t3, i64 %a2, i64 %a1
@@ -214,7 +214,7 @@ define i64 @scalar_i64_signed_mem_reg(i64* %a1_addr, i64 %a2) nounwind {
   ret i64 %a10
 }
 
-define i64 @scalar_i64_signed_reg_mem(i64 %a1, i64* %a2_addr) nounwind {
+define i64 @scalar_i64_signed_reg_mem(i64 %a1, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i64_signed_reg_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x1]
@@ -227,7 +227,7 @@ define i64 @scalar_i64_signed_reg_mem(i64 %a1, i64* %a2_addr) nounwind {
 ; CHECK-NEXT:    lsr x9, x9, #1
 ; CHECK-NEXT:    madd x0, x9, x8, x0
 ; CHECK-NEXT:    ret
-  %a2 = load i64, i64* %a2_addr
+  %a2 = load i64, ptr %a2_addr
   %t3 = icmp sgt i64 %a1, %a2 ; signed
   %t4 = select i1 %t3, i64 -1, i64 1
   %t5 = select i1 %t3, i64 %a2, i64 %a1
@@ -239,7 +239,7 @@ define i64 @scalar_i64_signed_reg_mem(i64 %a1, i64* %a2_addr) nounwind {
   ret i64 %a10
 }
 
-define i64 @scalar_i64_signed_mem_mem(i64* %a1_addr, i64* %a2_addr) nounwind {
+define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i64_signed_mem_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x9, [x0]
@@ -253,8 +253,8 @@ define i64 @scalar_i64_signed_mem_mem(i64* %a1_addr, i64* %a2_addr) nounwind {
 ; CHECK-NEXT:    lsr x10, x10, #1
 ; CHECK-NEXT:    madd x0, x10, x8, x9
 ; CHECK-NEXT:    ret
-  %a1 = load i64, i64* %a1_addr
-  %a2 = load i64, i64* %a2_addr
+  %a1 = load i64, ptr %a1_addr
+  %a2 = load i64, ptr %a2_addr
   %t3 = icmp sgt i64 %a1, %a2 ; signed
   %t4 = select i1 %t3, i64 -1, i64 1
   %t5 = select i1 %t3, i64 %a2, i64 %a1
@@ -322,7 +322,7 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
 
 ; Values are loaded. Only check signed case.
 
-define i16 @scalar_i16_signed_mem_reg(i16* %a1_addr, i16 %a2) nounwind {
+define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind {
 ; CHECK-LABEL: scalar_i16_signed_mem_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsh w9, [x0]
@@ -335,7 +335,7 @@ define i16 @scalar_i16_signed_mem_reg(i16* %a1_addr, i16 %a2) nounwind {
 ; CHECK-NEXT:    ubfx w10, w10, #1, #15
 ; CHECK-NEXT:    madd w0, w10, w8, w9
 ; CHECK-NEXT:    ret
-  %a1 = load i16, i16* %a1_addr
+  %a1 = load i16, ptr %a1_addr
   %t3 = icmp sgt i16 %a1, %a2 ; signed
   %t4 = select i1 %t3, i16 -1, i16 1
   %t5 = select i1 %t3, i16 %a2, i16 %a1
@@ -347,7 +347,7 @@ define i16 @scalar_i16_signed_mem_reg(i16* %a1_addr, i16 %a2) nounwind {
   ret i16 %a10
 }
 
-define i16 @scalar_i16_signed_reg_mem(i16 %a1, i16* %a2_addr) nounwind {
+define i16 @scalar_i16_signed_reg_mem(i16 %a1, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i16_signed_reg_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsh w9, [x1]
@@ -361,7 +361,7 @@ define i16 @scalar_i16_signed_reg_mem(i16 %a1, i16* %a2_addr) nounwind {
 ; CHECK-NEXT:    ubfx w8, w8, #1, #15
 ; CHECK-NEXT:    madd w0, w8, w9, w0
 ; CHECK-NEXT:    ret
-  %a2 = load i16, i16* %a2_addr
+  %a2 = load i16, ptr %a2_addr
   %t3 = icmp sgt i16 %a1, %a2 ; signed
   %t4 = select i1 %t3, i16 -1, i16 1
   %t5 = select i1 %t3, i16 %a2, i16 %a1
@@ -373,7 +373,7 @@ define i16 @scalar_i16_signed_reg_mem(i16 %a1, i16* %a2_addr) nounwind {
   ret i16 %a10
 }
 
-define i16 @scalar_i16_signed_mem_mem(i16* %a1_addr, i16* %a2_addr) nounwind {
+define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i16_signed_mem_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsh w9, [x0]
@@ -387,8 +387,8 @@ define i16 @scalar_i16_signed_mem_mem(i16* %a1_addr, i16* %a2_addr) nounwind {
 ; CHECK-NEXT:    ubfx w10, w10, #1, #15
 ; CHECK-NEXT:    madd w0, w10, w8, w9
 ; CHECK-NEXT:    ret
-  %a1 = load i16, i16* %a1_addr
-  %a2 = load i16, i16* %a2_addr
+  %a1 = load i16, ptr %a1_addr
+  %a2 = load i16, ptr %a2_addr
   %t3 = icmp sgt i16 %a1, %a2 ; signed
   %t4 = select i1 %t3, i16 -1, i16 1
   %t5 = select i1 %t3, i16 %a2, i16 %a1
@@ -456,7 +456,7 @@ define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind {
 
 ; Values are loaded. Only check signed case.
 
-define i8 @scalar_i8_signed_mem_reg(i8* %a1_addr, i8 %a2) nounwind {
+define i8 @scalar_i8_signed_mem_reg(ptr %a1_addr, i8 %a2) nounwind {
 ; CHECK-LABEL: scalar_i8_signed_mem_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsb w9, [x0]
@@ -469,7 +469,7 @@ define i8 @scalar_i8_signed_mem_reg(i8* %a1_addr, i8 %a2) nounwind {
 ; CHECK-NEXT:    ubfx w10, w10, #1, #7
 ; CHECK-NEXT:    madd w0, w10, w8, w9
 ; CHECK-NEXT:    ret
-  %a1 = load i8, i8* %a1_addr
+  %a1 = load i8, ptr %a1_addr
   %t3 = icmp sgt i8 %a1, %a2 ; signed
   %t4 = select i1 %t3, i8 -1, i8 1
   %t5 = select i1 %t3, i8 %a2, i8 %a1
@@ -481,7 +481,7 @@ define i8 @scalar_i8_signed_mem_reg(i8* %a1_addr, i8 %a2) nounwind {
   ret i8 %a10
 }
 
-define i8 @scalar_i8_signed_reg_mem(i8 %a1, i8* %a2_addr) nounwind {
+define i8 @scalar_i8_signed_reg_mem(i8 %a1, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i8_signed_reg_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsb w9, [x1]
@@ -495,7 +495,7 @@ define i8 @scalar_i8_signed_reg_mem(i8 %a1, i8* %a2_addr) nounwind {
 ; CHECK-NEXT:    ubfx w8, w8, #1, #7
 ; CHECK-NEXT:    madd w0, w8, w9, w0
 ; CHECK-NEXT:    ret
-  %a2 = load i8, i8* %a2_addr
+  %a2 = load i8, ptr %a2_addr
   %t3 = icmp sgt i8 %a1, %a2 ; signed
   %t4 = select i1 %t3, i8 -1, i8 1
   %t5 = select i1 %t3, i8 %a2, i8 %a1
@@ -507,7 +507,7 @@ define i8 @scalar_i8_signed_reg_mem(i8 %a1, i8* %a2_addr) nounwind {
   ret i8 %a10
 }
 
-define i8 @scalar_i8_signed_mem_mem(i8* %a1_addr, i8* %a2_addr) nounwind {
+define i8 @scalar_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
 ; CHECK-LABEL: scalar_i8_signed_mem_mem:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsb w9, [x0]
@@ -521,8 +521,8 @@ define i8 @scalar_i8_signed_mem_mem(i8* %a1_addr, i8* %a2_addr) nounwind {
 ; CHECK-NEXT:    ubfx w10, w10, #1, #7
 ; CHECK-NEXT:    madd w0, w10, w8, w9
 ; CHECK-NEXT:    ret
-  %a1 = load i8, i8* %a1_addr
-  %a2 = load i8, i8* %a2_addr
+  %a1 = load i8, ptr %a1_addr
+  %a2 = load i8, ptr %a2_addr
   %t3 = icmp sgt i8 %a1, %a2 ; signed
   %t4 = select i1 %t3, i8 -1, i8 1
   %t5 = select i1 %t3, i8 %a2, i8 %a1

diff  --git a/llvm/test/CodeGen/AArch64/min-max.ll b/llvm/test/CodeGen/AArch64/min-max.ll
index cdfd6d941097a..fb80c13cb9ca2 100644
--- a/llvm/test/CodeGen/AArch64/min-max.ll
+++ b/llvm/test/CodeGen/AArch64/min-max.ll
@@ -132,7 +132,7 @@ define <16 x i8> @smax16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define void @smax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define void @smax32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smax32i8:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    smax v1.16b, v1.16b, v3.16b
@@ -154,7 +154,7 @@ define void @smax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -182,7 +182,7 @@ define <8 x i16> @smax8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define void @smax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define void @smax16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smax16i16:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    smax v1.8h, v1.8h, v3.8h
@@ -204,7 +204,7 @@ define void @smax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -232,7 +232,7 @@ define <4 x i32> @smax4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define void @smax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define void @smax8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smax8i32:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    smax v1.4s, v1.4s, v3.4s
@@ -254,7 +254,7 @@ define void @smax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <8 x i32>@llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -310,7 +310,7 @@ define <2 x i64> @smax2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define void @smax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define void @smax4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smax4i64:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    cmgt v4.2d, v1.2d, v3.2d
@@ -338,7 +338,7 @@ define void @smax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }
 
@@ -468,7 +468,7 @@ define <16 x i8> @umax16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define void @umax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define void @umax32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umax32i8:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    umax v1.16b, v1.16b, v3.16b
@@ -490,7 +490,7 @@ define void @umax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -518,7 +518,7 @@ define <8 x i16> @umax8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define void @umax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define void @umax16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umax16i16:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    umax v1.8h, v1.8h, v3.8h
@@ -540,7 +540,7 @@ define void @umax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -568,7 +568,7 @@ define <4 x i32> @umax4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define void @umax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define void @umax8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umax8i32:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    umax v1.4s, v1.4s, v3.4s
@@ -590,7 +590,7 @@ define void @umax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <8 x i32>@llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -646,7 +646,7 @@ define <2 x i64> @umax2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define void @umax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define void @umax4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umax4i64:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    cmhi v4.2d, v1.2d, v3.2d
@@ -674,7 +674,7 @@ define void @umax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }
 
@@ -804,7 +804,7 @@ define <16 x i8> @smin16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define void @smin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define void @smin32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smin32i8:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    smin v1.16b, v1.16b, v3.16b
@@ -826,7 +826,7 @@ define void @smin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -854,7 +854,7 @@ define <8 x i16> @smin8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define void @smin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define void @smin16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smin16i16:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    smin v1.8h, v1.8h, v3.8h
@@ -876,7 +876,7 @@ define void @smin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -904,7 +904,7 @@ define <4 x i32> @smin4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define void @smin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define void @smin8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smin8i32:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    smin v1.4s, v1.4s, v3.4s
@@ -926,7 +926,7 @@ define void @smin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <8 x i32>@llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -982,7 +982,7 @@ define <2 x i64> @smin2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define void @smin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define void @smin4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: smin4i64:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    cmgt v4.2d, v3.2d, v1.2d
@@ -1010,7 +1010,7 @@ define void @smin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }
 
@@ -1140,7 +1140,7 @@ define <16 x i8> @umin16i8(<16 x i8> %a, <16 x i8> %b) {
 
 declare <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
 
-define void @umin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+define void @umin32i8(<32 x i8> %a, <32 x i8> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umin32i8:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    umin v1.16b, v1.16b, v3.16b
@@ -1162,7 +1162,7 @@ define void @umin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b)
-  store <32 x i8> %c, <32 x i8>* %p
+  store <32 x i8> %c, ptr %p
   ret void
 }
 
@@ -1190,7 +1190,7 @@ define <8 x i16> @umin8i16(<8 x i16> %a, <8 x i16> %b) {
 
 declare <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
 
-define void @umin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+define void @umin16i16(<16 x i16> %a, <16 x i16> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umin16i16:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    umin v1.8h, v1.8h, v3.8h
@@ -1212,7 +1212,7 @@ define void @umin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b)
-  store <16 x i16> %c, <16 x i16>* %p
+  store <16 x i16> %c, ptr %p
   ret void
 }
 
@@ -1240,7 +1240,7 @@ define <4 x i32> @umin4i32(<4 x i32> %a, <4 x i32> %b) {
 
 declare <8 x i32> @llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define void @umin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+define void @umin8i32(<8 x i32> %a, <8 x i32> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umin8i32:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    umin v1.4s, v1.4s, v3.4s
@@ -1262,7 +1262,7 @@ define void @umin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <8 x i32>@llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b)
-  store <8 x i32> %c, <8 x i32>* %p
+  store <8 x i32> %c, ptr %p
   ret void
 }
 
@@ -1318,7 +1318,7 @@ define <2 x i64> @umin2i64(<2 x i64> %a, <2 x i64> %b) {
 
 declare <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
 
-define void @umin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+define void @umin4i64(<4 x i64> %a, <4 x i64> %b, ptr %p) {
 ; CHECK-ISEL-LABEL: umin4i64:
 ; CHECK-ISEL:       // %bb.0:
 ; CHECK-ISEL-NEXT:    cmhi v4.2d, v3.2d, v1.2d
@@ -1346,6 +1346,6 @@ define void @umin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
 ; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b)
-  store <4 x i64> %c, <4 x i64>* %p
+  store <4 x i64> %c, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/mingw-refptr.ll b/llvm/test/CodeGen/AArch64/mingw-refptr.ll
index e68658eadeec1..306bee9f85c42 100644
--- a/llvm/test/CodeGen/AArch64/mingw-refptr.ll
+++ b/llvm/test/CodeGen/AArch64/mingw-refptr.ll
@@ -15,7 +15,7 @@ define dso_local i32 @getVar() {
 ; CHECK:    ldr  w0, [x8]
 ; CHECK:    ret
 entry:
-  %0 = load i32, i32* @var, align 4
+  %0 = load i32, ptr @var, align 4
   ret i32 %0
 }
 
@@ -25,7 +25,7 @@ define dso_local i32 @getDsoLocalVar() {
 ; CHECK:    ldr  w0, [x8, :lo12:dsolocalvar]
 ; CHECK:    ret
 entry:
-  %0 = load i32, i32* @dsolocalvar, align 4
+  %0 = load i32, ptr @dsolocalvar, align 4
   ret i32 %0
 }
 
@@ -35,7 +35,7 @@ define dso_local i32 @getLocalVar() {
 ; CHECK:    ldr  w0, [x8, :lo12:localvar]
 ; CHECK:    ret
 entry:
-  %0 = load i32, i32* @localvar, align 4
+  %0 = load i32, ptr @localvar, align 4
   ret i32 %0
 }
 
@@ -45,7 +45,7 @@ define dso_local i32 @getLocalCommon() {
 ; CHECK:    ldr  w0, [x8, :lo12:localcommon]
 ; CHECK:    ret
 entry:
-  %0 = load i32, i32* @localcommon, align 4
+  %0 = load i32, ptr @localcommon, align 4
   ret i32 %0
 }
 
@@ -56,7 +56,7 @@ define dso_local i32 @getExtVar() {
 ; CHECK:    ldr  w0, [x8]
 ; CHECK:    ret
 entry:
-  %0 = load i32, i32* @extvar, align 4
+  %0 = load i32, ptr @extvar, align 4
   ret i32 %0
 }
 
@@ -82,15 +82,15 @@ define dso_local void @sspFunc() #0 {
 ; GISEL:    ldr  x8, [x8]
 entry:
   %c = alloca i8, align 1
-  call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %c)
-  call void @ptrUser(i8* nonnull %c)
-  call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %c)
+  call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %c)
+  call void @ptrUser(ptr nonnull %c)
+  call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %c)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare dso_local void @ptrUser(i8*) local_unnamed_addr #2
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare dso_local void @ptrUser(ptr) local_unnamed_addr #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
 attributes #0 = { sspstrong }
 

diff  --git a/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll b/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll
index ed53c77e4c88c..1dc594ba878ea 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-addadrp.ll
@@ -13,18 +13,18 @@
 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-v1     | FileCheck %s
 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-n2     | FileCheck %s
 
- at g = common local_unnamed_addr global i8* null, align 8
+ at g = common local_unnamed_addr global ptr null, align 8
 
-define dso_local i8* @addldr(i32 %a, i32 %b) {
+define dso_local ptr @addldr(i32 %a, i32 %b) {
 ; CHECK-LABEL: addldr:
 ; CHECK: adrp [[R:x[0-9]+]], addldr
 ; CHECK-NEXT: add {{x[0-9]+}}, [[R]], :lo12:addldr
 entry:
   %add = add nsw i32 %b, %a
   %idx.ext = sext i32 %add to i64
-  %add.ptr = getelementptr i8, i8* bitcast (i8* (i32, i32)* @addldr to i8*), i64 %idx.ext
-  store i8* %add.ptr, i8** @g, align 8
-  ret i8* %add.ptr
+  %add.ptr = getelementptr i8, ptr @addldr, i64 %idx.ext
+  store ptr %add.ptr, ptr @g, align 8
+  ret ptr %add.ptr
 }
 
 

diff  --git a/llvm/test/CodeGen/AArch64/misched-fusion-addr-tune.ll b/llvm/test/CodeGen/AArch64/misched-fusion-addr-tune.ll
index db28719ddfbfe..05297f3a52c0c 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-addr-tune.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-addr-tune.ll
@@ -5,12 +5,12 @@
 @var_double2 = dso_local global <2 x double> <double 0.0, double 0.0>
 
 define dso_local void @ldst_double() {
-  %valf = load volatile float, float* @var_float
+  %valf = load volatile float, ptr @var_float
   %vale = fpext float %valf to double
-  %vald = load volatile double, double* @var_double
+  %vald = load volatile double, ptr @var_double
   %vald1 = insertelement <2 x double> undef, double %vald, i32 0
   %vald2 = insertelement <2 x double> %vald1, double %vale, i32 1
-  store volatile <2 x double> %vald2, <2 x double>* @var_double2
+  store volatile <2 x double> %vald2, ptr @var_double2
   ret void
 
 ; CHECK-LABEL: ldst_double:
@@ -21,12 +21,12 @@ define dso_local void @ldst_double() {
 }
 
 define dso_local void @ldst_double_tune_a53() #0 {
-  %valf = load volatile float, float* @var_float
+  %valf = load volatile float, ptr @var_float
   %vale = fpext float %valf to double
-  %vald = load volatile double, double* @var_double
+  %vald = load volatile double, ptr @var_double
   %vald1 = insertelement <2 x double> undef, double %vald, i32 0
   %vald2 = insertelement <2 x double> %vald1, double %vale, i32 1
-  store volatile <2 x double> %vald2, <2 x double>* @var_double2
+  store volatile <2 x double> %vald2, ptr @var_double2
   ret void
 
 ; CHECK-LABEL: ldst_double_tune_a53:

diff  --git a/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll b/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll
index 158a64acf5df2..29349952ce876 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-addr.ll
@@ -17,11 +17,11 @@ target triple = "aarch64-unknown"
 @var_double2 = dso_local global <2 x double> <double 0.0, double 0.0>
 
 define dso_local void @ldst_8bit() {
-  %val8 = load volatile i8, i8* @var_8bit
+  %val8 = load volatile i8, ptr @var_8bit
   %ext = zext i8 %val8 to i64
   %add = add i64 %ext, 1
   %val16 = trunc i64 %add to i16
-  store volatile i16 %val16, i16* @var_16bit
+  store volatile i16 %val16, ptr @var_16bit
   ret void
 
 ; CHECK-LABEL: ldst_8bit:
@@ -32,11 +32,11 @@ define dso_local void @ldst_8bit() {
 }
 
 define dso_local void @ldst_16bit() {
-  %val16 = load volatile i16, i16* @var_16bit
+  %val16 = load volatile i16, ptr @var_16bit
   %ext = zext i16 %val16 to i64
   %add = add i64 %ext, 1
   %val32 = trunc i64 %add to i32
-  store volatile i32 %val32, i32* @var_32bit
+  store volatile i32 %val32, ptr @var_32bit
   ret void
 
 ; CHECK-LABEL: ldst_16bit:
@@ -47,10 +47,10 @@ define dso_local void @ldst_16bit() {
 }
 
 define dso_local void @ldst_32bit() {
-  %val32 = load volatile i32, i32* @var_32bit
+  %val32 = load volatile i32, ptr @var_32bit
   %ext = zext i32 %val32 to i64
   %val64 = add i64 %ext, 1
-  store volatile i64 %val64, i64* @var_64bit
+  store volatile i64 %val64, ptr @var_64bit
   ret void
 
 ; CHECK-LABEL: ldst_32bit:
@@ -61,10 +61,10 @@ define dso_local void @ldst_32bit() {
 }
 
 define dso_local void @ldst_64bit() {
-  %val64 = load volatile i64, i64* @var_64bit
+  %val64 = load volatile i64, ptr @var_64bit
   %ext = zext i64 %val64 to i128
   %val128 = add i128 %ext, 1
-  store volatile i128 %val128, i128* @var_128bit
+  store volatile i128 %val128, ptr @var_128bit
   ret void
 
 ; CHECK-LABEL: ldst_64bit:
@@ -75,9 +75,9 @@ define dso_local void @ldst_64bit() {
 }
 
 define dso_local void @ldst_half() {
-  %valh = load volatile half, half* @var_half
+  %valh = load volatile half, ptr @var_half
   %valf = fpext half %valh to float
-  store volatile float %valf, float* @var_float
+  store volatile float %valf, ptr @var_float
   ret void
 
 ; CHECK-LABEL: ldst_half:
@@ -88,9 +88,9 @@ define dso_local void @ldst_half() {
 }
 
 define dso_local void @ldst_float() {
-  %valf = load volatile float, float* @var_float
+  %valf = load volatile float, ptr @var_float
   %vald = fpext float %valf to double
-  store volatile double %vald, double* @var_double
+  store volatile double %vald, ptr @var_double
   ret void
 
 ; CHECK-LABEL: ldst_float:
@@ -101,12 +101,12 @@ define dso_local void @ldst_float() {
 }
 
 define dso_local void @ldst_double() {
-  %valf = load volatile float, float* @var_float
+  %valf = load volatile float, ptr @var_float
   %vale = fpext float %valf to double
-  %vald = load volatile double, double* @var_double
+  %vald = load volatile double, ptr @var_double
   %vald1 = insertelement <2 x double> undef, double %vald, i32 0
   %vald2 = insertelement <2 x double> %vald1, double %vale, i32 1
-  store volatile <2 x double> %vald2, <2 x double>* @var_double2
+  store volatile <2 x double> %vald2, ptr @var_double2
   ret void
 
 ; CHECK-LABEL: ldst_double:

diff  --git a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
index 111f17363bc3f..6ee3cb4892852 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
@@ -24,15 +24,15 @@ declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d)
 declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k)
 declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %d)
 
-define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
-  %d0 = load <16 x i8>, <16 x i8>* %a0
-  %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
-  %d1 = load <16 x i8>, <16 x i8>* %a1
-  %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
-  %d2 = load <16 x i8>, <16 x i8>* %a2
-  %a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
-  %d3 = load <16 x i8>, <16 x i8>* %a3
-  %k0 = load <16 x i8>, <16 x i8>* %b0
+define void @aesea(ptr %a0, ptr %b0, ptr %c0, <16 x i8> %d, <16 x i8> %e) {
+  %d0 = load <16 x i8>, ptr %a0
+  %a1 = getelementptr inbounds <16 x i8>, ptr %a0, i64 1
+  %d1 = load <16 x i8>, ptr %a1
+  %a2 = getelementptr inbounds <16 x i8>, ptr %a0, i64 2
+  %d2 = load <16 x i8>, ptr %a2
+  %a3 = getelementptr inbounds <16 x i8>, ptr %a0, i64 3
+  %d3 = load <16 x i8>, ptr %a3
+  %k0 = load <16 x i8>, ptr %b0
   %e00 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d0, <16 x i8> %k0)
   %f00 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
   %e01 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d1, <16 x i8> %k0)
@@ -41,8 +41,8 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %f02 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
   %e03 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d3, <16 x i8> %k0)
   %f03 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
-  %b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
-  %k1 = load <16 x i8>, <16 x i8>* %b1
+  %b1 = getelementptr inbounds <16 x i8>, ptr %b0, i64 1
+  %k1 = load <16 x i8>, ptr %b1
   %e10 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f00, <16 x i8> %k1)
   %f10 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
   %e11 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f01, <16 x i8> %k1)
@@ -51,8 +51,8 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %f12 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
   %e13 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f03, <16 x i8> %k1)
   %f13 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
-  %b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
-  %k2 = load <16 x i8>, <16 x i8>* %b2
+  %b2 = getelementptr inbounds <16 x i8>, ptr %b0, i64 2
+  %k2 = load <16 x i8>, ptr %b2
   %e20 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f10, <16 x i8> %k2)
   %f20 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e10)
   %e21 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f11, <16 x i8> %k2)
@@ -61,8 +61,8 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %f22 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e12)
   %e23 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f13, <16 x i8> %k2)
   %f23 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e13)
-  %b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
-  %k3 = load <16 x i8>, <16 x i8>* %b3
+  %b3 = getelementptr inbounds <16 x i8>, ptr %b0, i64 3
+  %k3 = load <16 x i8>, ptr %b3
   %e30 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f20, <16 x i8> %k3)
   %f30 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e20)
   %e31 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f21, <16 x i8> %k3)
@@ -79,13 +79,13 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %h2 = xor <16 x i8> %g2, %e
   %g3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f33, <16 x i8> %d)
   %h3 = xor <16 x i8> %g3, %e
-  store <16 x i8> %h0, <16 x i8>* %c0
-  %c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
-  store <16 x i8> %h1, <16 x i8>* %c1
-  %c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
-  store <16 x i8> %h2, <16 x i8>* %c2
-  %c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
-  store <16 x i8> %h3, <16 x i8>* %c3
+  store <16 x i8> %h0, ptr %c0
+  %c1 = getelementptr inbounds <16 x i8>, ptr %c0, i64 1
+  store <16 x i8> %h1, ptr %c1
+  %c2 = getelementptr inbounds <16 x i8>, ptr %c0, i64 2
+  store <16 x i8> %h2, ptr %c2
+  %c3 = getelementptr inbounds <16 x i8>, ptr %c0, i64 3
+  store <16 x i8> %h3, ptr %c3
   ret void
 
 ; CHECK-LABEL: aesea:
@@ -108,15 +108,15 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
 ; CHECK-NOT: aesmc
 }
 
-define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
-  %d0 = load <16 x i8>, <16 x i8>* %a0
-  %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
-  %d1 = load <16 x i8>, <16 x i8>* %a1
-  %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
-  %d2 = load <16 x i8>, <16 x i8>* %a2
-  %a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
-  %d3 = load <16 x i8>, <16 x i8>* %a3
-  %k0 = load <16 x i8>, <16 x i8>* %b0
+define void @aesda(ptr %a0, ptr %b0, ptr %c0, <16 x i8> %d, <16 x i8> %e) {
+  %d0 = load <16 x i8>, ptr %a0
+  %a1 = getelementptr inbounds <16 x i8>, ptr %a0, i64 1
+  %d1 = load <16 x i8>, ptr %a1
+  %a2 = getelementptr inbounds <16 x i8>, ptr %a0, i64 2
+  %d2 = load <16 x i8>, ptr %a2
+  %a3 = getelementptr inbounds <16 x i8>, ptr %a0, i64 3
+  %d3 = load <16 x i8>, ptr %a3
+  %k0 = load <16 x i8>, ptr %b0
   %e00 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d0, <16 x i8> %k0)
   %f00 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
   %e01 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d1, <16 x i8> %k0)
@@ -125,8 +125,8 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %f02 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
   %e03 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d3, <16 x i8> %k0)
   %f03 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
-  %b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
-  %k1 = load <16 x i8>, <16 x i8>* %b1
+  %b1 = getelementptr inbounds <16 x i8>, ptr %b0, i64 1
+  %k1 = load <16 x i8>, ptr %b1
   %e10 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f00, <16 x i8> %k1)
   %f10 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
   %e11 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f01, <16 x i8> %k1)
@@ -135,8 +135,8 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %f12 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
   %e13 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f03, <16 x i8> %k1)
   %f13 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
-  %b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
-  %k2 = load <16 x i8>, <16 x i8>* %b2
+  %b2 = getelementptr inbounds <16 x i8>, ptr %b0, i64 2
+  %k2 = load <16 x i8>, ptr %b2
   %e20 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f10, <16 x i8> %k2)
   %f20 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e10)
   %e21 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f11, <16 x i8> %k2)
@@ -145,8 +145,8 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %f22 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e12)
   %e23 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f13, <16 x i8> %k2)
   %f23 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e13)
-  %b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
-  %k3 = load <16 x i8>, <16 x i8>* %b3
+  %b3 = getelementptr inbounds <16 x i8>, ptr %b0, i64 3
+  %k3 = load <16 x i8>, ptr %b3
   %e30 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f20, <16 x i8> %k3)
   %f30 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e20)
   %e31 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f21, <16 x i8> %k3)
@@ -163,13 +163,13 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
   %h2 = xor <16 x i8> %g2, %e
   %g3 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f33, <16 x i8> %d)
   %h3 = xor <16 x i8> %g3, %e
-  store <16 x i8> %h0, <16 x i8>* %c0
-  %c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
-  store <16 x i8> %h1, <16 x i8>* %c1
-  %c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
-  store <16 x i8> %h2, <16 x i8>* %c2
-  %c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
-  store <16 x i8> %h3, <16 x i8>* %c3
+  store <16 x i8> %h0, ptr %c0
+  %c1 = getelementptr inbounds <16 x i8>, ptr %c0, i64 1
+  store <16 x i8> %h1, ptr %c1
+  %c2 = getelementptr inbounds <16 x i8>, ptr %c0, i64 2
+  store <16 x i8> %h2, ptr %c2
+  %c3 = getelementptr inbounds <16 x i8>, ptr %c0, i64 3
+  store <16 x i8> %h3, ptr %c3
   ret void
 
 ; CHECK-LABEL: aesda:
@@ -192,24 +192,24 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
 ; CHECK-NOT: aesimc
 }
 
-define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) {
+define void @aes_load_store(ptr %p1, ptr %p2 , ptr %p3) {
 entry:
   %x1 = alloca <16 x i8>, align 16
   %x2 = alloca <16 x i8>, align 16
   %x3 = alloca <16 x i8>, align 16
   %x4 = alloca <16 x i8>, align 16
   %x5 = alloca <16 x i8>, align 16
-  %in1 = load <16 x i8>, <16 x i8>* %p1, align 16
-  store <16 x i8> %in1, <16 x i8>* %x1, align 16
+  %in1 = load <16 x i8>, ptr %p1, align 16
+  store <16 x i8> %in1, ptr %x1, align 16
   %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
-  %in2 = load <16 x i8>, <16 x i8>* %p2, align 16
+  %in2 = load <16 x i8>, ptr %p2, align 16
   %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
   %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
-  store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16
-  %in3 = load <16 x i8>, <16 x i8>* %p3, align 16
+  store <16 x i8> %aesmc1, ptr %x3, align 16
+  %in3 = load <16 x i8>, ptr %p3, align 16
   %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2
   %aese3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc2, <16 x i8> %in3) #2
-  store <16 x i8> %aese3, <16 x i8>* %x5, align 16
+  store <16 x i8> %aese3, ptr %x5, align 16
   ret void
 
 ; CHECK-LABEL: aes_load_store:

diff  --git a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
index 9cea33126e92d..ad244d30df11f 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
@@ -8,41 +8,41 @@
 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m5       | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKFUSE
 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-n1     | FileCheck %s --check-prefix=CHECKFUSE-NEOVERSE
 
- at g = common local_unnamed_addr global i8* null, align 8
+ at g = common local_unnamed_addr global ptr null, align 8
 
-define dso_local i8* @litp(i32 %a, i32 %b) {
+define dso_local ptr @litp(i32 %a, i32 %b) {
 entry:
   %add = add nsw i32 %b, %a
   %idx.ext = sext i32 %add to i64
-  %add.ptr = getelementptr i8, i8* bitcast (i8* (i32, i32)* @litp to i8*), i64 %idx.ext
-  store i8* %add.ptr, i8** @g, align 8
-  ret i8* %add.ptr
+  %add.ptr = getelementptr i8, ptr @litp, i64 %idx.ext
+  store ptr %add.ptr, ptr @g, align 8
+  ret ptr %add.ptr
 
 ; CHECK-LABEL: litp:
 ; CHECK: adrp [[R:x[0-9]+]], litp
 ; CHECKFUSE-NEXT: add {{x[0-9]+}}, [[R]], :lo12:litp
 }
 
-define dso_local i8* @litp_tune_generic(i32 %a, i32 %b) "tune-cpu"="generic" {
+define dso_local ptr @litp_tune_generic(i32 %a, i32 %b) "tune-cpu"="generic" {
 entry:
   %add = add nsw i32 %b, %a
   %idx.ext = sext i32 %add to i64
-  %add.ptr = getelementptr i8, i8* bitcast (i8* (i32, i32)* @litp_tune_generic to i8*), i64 %idx.ext
-  store i8* %add.ptr, i8** @g, align 8
-  ret i8* %add.ptr
+  %add.ptr = getelementptr i8, ptr @litp_tune_generic, i64 %idx.ext
+  store ptr %add.ptr, ptr @g, align 8
+  ret ptr %add.ptr
 
 ; CHECK-LABEL: litp_tune_generic:
 ; CHECK:         adrp [[R:x[0-9]+]], litp_tune_generic
 ; CHECK-NEXT:    add {{x[0-9]+}}, [[R]], :lo12:litp_tune_generic
 }
 
-define dso_local i8* @litp_tune_neoverse_n1(i32 %a, i32 %b) "tune-cpu"="neoverse-n1" {
+define dso_local ptr @litp_tune_neoverse_n1(i32 %a, i32 %b) "tune-cpu"="neoverse-n1" {
 entry:
   %add = add nsw i32 %b, %a
   %idx.ext = sext i32 %add to i64
-  %add.ptr = getelementptr i8, i8* bitcast (i8* (i32, i32)* @litp_tune_generic to i8*), i64 %idx.ext
-  store i8* %add.ptr, i8** @g, align 8
-  ret i8* %add.ptr
+  %add.ptr = getelementptr i8, ptr @litp_tune_generic, i64 %idx.ext
+  store ptr %add.ptr, ptr @g, align 8
+  ret ptr %add.ptr
 
 ; CHECKFUSE-NEOVERSE-LABEL: litp_tune_neoverse_n1:
 ; CHECKFUSE-NEOVERSE:         adrp [[R:x[0-9]+]], litp_tune_generic

diff  --git a/llvm/test/CodeGen/AArch64/misched-stp.ll b/llvm/test/CodeGen/AArch64/misched-stp.ll
index a92be688c4f46..0c8be4b1c19e6 100644
--- a/llvm/test/CodeGen/AArch64/misched-stp.ll
+++ b/llvm/test/CodeGen/AArch64/misched-stp.ll
@@ -13,38 +13,36 @@
 ; CHECK:     ldr [[REG:w[0-9]+]], [x2]
 ; CHECK-DAG: stp w0, [[REG]], [x2, #12]
 ; CHECK-DAG: stp [[REG]], w1, [x2, #4]
-define void @test_splat(i32 %x, i32 %y, i32* %p) {
+define void @test_splat(i32 %x, i32 %y, ptr %p) {
 entry:
-  %val = load i32, i32* %p, align 4
-  %0 = getelementptr inbounds i32, i32* %p, i64 1
-  %1 = getelementptr inbounds i32, i32* %p, i64 2
-  %2 = getelementptr inbounds i32, i32* %p, i64 3
+  %val = load i32, ptr %p, align 4
+  %0 = getelementptr inbounds i32, ptr %p, i64 1
+  %1 = getelementptr inbounds i32, ptr %p, i64 2
+  %2 = getelementptr inbounds i32, ptr %p, i64 3
   %vec0 = insertelement <4 x i32> undef, i32 %val, i32 0
   %vec1 = insertelement <4 x i32> %vec0, i32 %val, i32 1
   %vec2 = insertelement <4 x i32> %vec1, i32 %val, i32 2
   %vec3 = insertelement <4 x i32> %vec2, i32 %val, i32 3
-  %3 = bitcast i32* %0 to <4 x i32>*
-  store <4 x i32> %vec3, <4 x i32>* %3, align 4
-  store i32 %x, i32* %2, align 4
-  store i32 %y, i32* %1, align 4
+  store <4 x i32> %vec3, ptr %0, align 4
+  store i32 %x, ptr %2, align 4
+  store i32 %y, ptr %1, align 4
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
-%struct.tree_common = type { i8*, i8*, i32 }
+declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
+%struct.tree_common = type { ptr, ptr, i32 }
 
 ; CHECK-LABEL: test_zero
 ; CHECK-DAG: stp x2, xzr, [x0, #8]
 ; CHECK-DAG: str w1, [x0, #16]
 ; CHECK-DAG: str xzr, [x0]
 
-define void @test_zero(%struct.tree_common* %t, i32 %code, i8* %type) {
+define void @test_zero(ptr %t, i32 %code, ptr %type) {
 entry:
-  %0 = bitcast %struct.tree_common* %t to i8*
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 24, i1 false)
-  %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
-  store i32 %code, i32* %code1, align 8
-  %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
-  store i8* %type, i8** %type2, align 8
+  tail call void @llvm.memset.p0.i64(ptr align 8 %t, i8 0, i64 24, i1 false)
+  %code1 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 2
+  store i32 %code, ptr %code1, align 8
+  %type2 = getelementptr inbounds %struct.tree_common, ptr %t, i64 0, i32 1
+  store ptr %type, ptr %type2, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/movw-consts.ll b/llvm/test/CodeGen/AArch64/movw-consts.ll
index 66c011aaa69c8..d585f74199cc2 100644
--- a/llvm/test/CodeGen/AArch64/movw-consts.ll
+++ b/llvm/test/CodeGen/AArch64/movw-consts.ll
@@ -102,7 +102,7 @@ define void @test11() {
 ; CHECK-NEXT:    adrp x8, _var32 at PAGE
 ; CHECK-NEXT:    str wzr, [x8, _var32 at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 0, i32* @var32
+  store i32 0, ptr @var32
   ret void
 }
 
@@ -113,7 +113,7 @@ define void @test12() {
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    str w9, [x8, _var32 at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 1, i32* @var32
+  store i32 1, ptr @var32
   ret void
 }
 
@@ -124,7 +124,7 @@ define void @test13() {
 ; CHECK-NEXT:    mov w9, #65535
 ; CHECK-NEXT:    str w9, [x8, _var32 at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 65535, i32* @var32
+  store i32 65535, ptr @var32
   ret void
 }
 
@@ -135,7 +135,7 @@ define void @test14() {
 ; CHECK-NEXT:    mov w9, #65536
 ; CHECK-NEXT:    str w9, [x8, _var32 at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 65536, i32* @var32
+  store i32 65536, ptr @var32
   ret void
 }
 
@@ -146,7 +146,7 @@ define void @test15() {
 ; CHECK-NEXT:    mov w9, #-65536
 ; CHECK-NEXT:    str w9, [x8, _var32 at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 4294901760, i32* @var32
+  store i32 4294901760, ptr @var32
   ret void
 }
 
@@ -157,7 +157,7 @@ define void @test16() {
 ; CHECK-NEXT:    mov w9, #-1
 ; CHECK-NEXT:    str w9, [x8, _var32 at PAGEOFF]
 ; CHECK-NEXT:    ret
-  store i32 -1, i32* @var32
+  store i32 -1, ptr @var32
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/movw-shift-encoding.ll b/llvm/test/CodeGen/AArch64/movw-shift-encoding.ll
index d3bbfd9e5caef..573dd3f6a184c 100644
--- a/llvm/test/CodeGen/AArch64/movw-shift-encoding.ll
+++ b/llvm/test/CodeGen/AArch64/movw-shift-encoding.ll
@@ -5,8 +5,8 @@
 ; CodeGen should ensure that the correct shift bits are set, because the linker
 ; isn't going to!
 
-define dso_local i32* @get_var() {
-  ret i32* @var
+define dso_local ptr @get_var() {
+  ret ptr @var
 
 ; CHECK: movz    x0, #:abs_g0_nc:var // encoding: [0bAAA00000,A,0b100AAAAA,0xd2]
 ; CHECK: movk    x0, #:abs_g1_nc:var // encoding: [0bAAA00000,A,0b101AAAAA,0xf2]

diff  --git a/llvm/test/CodeGen/AArch64/multi-vector-store-size.ll b/llvm/test/CodeGen/AArch64/multi-vector-store-size.ll
index ddc79400cee8a..5763ec61667f2 100644
--- a/llvm/test/CodeGen/AArch64/multi-vector-store-size.ll
+++ b/llvm/test/CodeGen/AArch64/multi-vector-store-size.ll
@@ -1,22 +1,22 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -stop-after=instruction-select < %s | FileCheck %s
 
-declare void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
-declare void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
-declare void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float>, <4 x float>, ptr)
+declare void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, ptr)
+declare void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, ptr)
 
-declare void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float>, <4 x float>, float*)
-declare void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, float*)
-declare void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, float*)
+declare void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float>, <4 x float>, ptr)
+declare void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, ptr)
+declare void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, ptr)
 
-declare void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float>, <4 x float>, i64, float*)
-declare void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, i64, float*)
-declare void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, float*)
+declare void @llvm.aarch64.neon.st2lane.v4f32.p0(<4 x float>, <4 x float>, i64, ptr)
+declare void @llvm.aarch64.neon.st3lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, i64, ptr)
+declare void @llvm.aarch64.neon.st4lane.v4f32.p0(<4 x float>, <4 x float>, <4 x float>, <4 x float>, i64, ptr)
 
-define void @addstx(float* %res, <4 x float>* %a,  <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
-  %al = load <4 x float>, <4 x float>* %a
-  %bl = load <4 x float>, <4 x float>* %b
-  %cl = load <4 x float>, <4 x float>* %c
-  %dl = load <4 x float>, <4 x float>* %d
+define void @addstx(ptr %res, ptr %a,  ptr %b, ptr %c, ptr %d) {
+  %al = load <4 x float>, ptr %a
+  %bl = load <4 x float>, ptr %b
+  %cl = load <4 x float>, ptr %c
+  %dl = load <4 x float>, ptr %d
 
   %ar = fadd <4 x float> %al, %bl
   %br = fadd <4 x float> %bl, %cl
@@ -25,21 +25,21 @@ define void @addstx(float* %res, <4 x float>* %a,  <4 x float>* %b, <4 x float>*
 
 ; The sizes below are conservative.  AArch64TargetLowering
 ; conservatively assumes the entire vector is stored.
-  tail call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, float* %res)
+  tail call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> %ar, <4 x float> %br, ptr %res)
 ; CHECK: ST2Twov4s {{.*}} :: (store (s256) {{.*}})
-  tail call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, float* %res)
+  tail call void @llvm.aarch64.neon.st3.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, ptr %res)
 ; CHECK: ST3Threev4s {{.*}} :: (store (s384) {{.*}})
-  tail call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, float* %res)
+  tail call void @llvm.aarch64.neon.st4.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, ptr %res)
 ; CHECK: ST4Fourv4s {{.*}} :: (store (s512) {{.*}})
 
   ret void
 }
 
-define void @addst1x(float* %res, <4 x float>* %a,  <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
-  %al = load <4 x float>, <4 x float>* %a
-  %bl = load <4 x float>, <4 x float>* %b
-  %cl = load <4 x float>, <4 x float>* %c
-  %dl = load <4 x float>, <4 x float>* %d
+define void @addst1x(ptr %res, ptr %a,  ptr %b, ptr %c, ptr %d) {
+  %al = load <4 x float>, ptr %a
+  %bl = load <4 x float>, ptr %b
+  %cl = load <4 x float>, ptr %c
+  %dl = load <4 x float>, ptr %d
 
   %ar = fadd <4 x float> %al, %bl
   %br = fadd <4 x float> %bl, %cl
@@ -48,21 +48,21 @@ define void @addst1x(float* %res, <4 x float>* %a,  <4 x float>* %b, <4 x float>
 
 ; The sizes below are conservative.  AArch64TargetLowering
 ; conservatively assumes the entire vector is stored.
-  tail call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, float* %res)
+  tail call void @llvm.aarch64.neon.st1x2.v4f32.p0(<4 x float> %ar, <4 x float> %br, ptr %res)
 ; CHECK: ST1Twov4s {{.*}} :: (store (s256) {{.*}})
-  tail call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, float* %res)
+  tail call void @llvm.aarch64.neon.st1x3.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, ptr %res)
 ; CHECK: ST1Threev4s {{.*}} :: (store (s384) {{.*}})
-  tail call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, float* %res)
+  tail call void @llvm.aarch64.neon.st1x4.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, ptr %res)
 ; CHECK: ST1Fourv4s {{.*}} :: (store (s512) {{.*}})
 
   ret void
 }
 
-define void @addstxlane(float* %res, <4 x float>* %a,  <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
-  %al = load <4 x float>, <4 x float>* %a
-  %bl = load <4 x float>, <4 x float>* %b
-  %cl = load <4 x float>, <4 x float>* %c
-  %dl = load <4 x float>, <4 x float>* %d
+define void @addstxlane(ptr %res, ptr %a,  ptr %b, ptr %c, ptr %d) {
+  %al = load <4 x float>, ptr %a
+  %bl = load <4 x float>, ptr %b
+  %cl = load <4 x float>, ptr %c
+  %dl = load <4 x float>, ptr %d
 
   %ar = fadd <4 x float> %al, %bl
   %br = fadd <4 x float> %bl, %cl
@@ -71,11 +71,11 @@ define void @addstxlane(float* %res, <4 x float>* %a,  <4 x float>* %b, <4 x flo
 
 ; The sizes below are conservative.  AArch64TargetLowering
 ; conservatively assumes the entire vector is stored.
-  tail call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, i64 1, float* %res)
+  tail call void @llvm.aarch64.neon.st2lane.v4f32.p0(<4 x float> %ar, <4 x float> %br, i64 1, ptr %res)
 ; CHECK: ST2i32 {{.*}} :: (store (s256) {{.*}})
-  tail call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, i64 1, float* %res)
+  tail call void @llvm.aarch64.neon.st3lane.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, i64 1, ptr %res)
 ; CHECK: ST3i32 {{.*}} :: (store (s384) {{.*}})
-  tail call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, i64 1, float* %res)
+  tail call void @llvm.aarch64.neon.st4lane.v4f32.p0(<4 x float> %ar, <4 x float> %br, <4 x float> %cr, <4 x float> %dr, i64 1, ptr %res)
 ; CHECK: ST4i32 {{.*}} :: (store (s512) {{.*}})
 
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/neon-addlv.ll b/llvm/test/CodeGen/AArch64/neon-addlv.ll
index 1e4931e322c9a..d6d9884a75067 100644
--- a/llvm/test/CodeGen/AArch64/neon-addlv.ll
+++ b/llvm/test/CodeGen/AArch64/neon-addlv.ll
@@ -19,66 +19,66 @@ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) nounwind readnone
 declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) nounwind readnone
 declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>) nounwind readnone
 
-define i16 @uaddlv4h_from_v8i8(<8 x i8>* %A) nounwind {
+define i16 @uaddlv4h_from_v8i8(ptr %A) nounwind {
 ; CHECK-LABEL: uaddlv4h_from_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    uaddlv h0, v0.8b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp1 = load <8 x i8>, ptr %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
   %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3)
   ret i16 %tmp5
 }
 
-define i16 @uaddlv16b_from_v16i8(<16 x i8>* %A) nounwind {
+define i16 @uaddlv16b_from_v16i8(ptr %A) nounwind {
 ; CHECK-LABEL: uaddlv16b_from_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    uaddlv h0, v0.16b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
+  %tmp1 = load <16 x i8>, ptr %A
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
   %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3)
   ret i16 %tmp5
 }
 
-define i32 @uaddlv8h_from_v8i16(<8 x i16>* %A) nounwind {
+define i32 @uaddlv8h_from_v8i16(ptr %A) nounwind {
 ; CHECK-LABEL: uaddlv8h_from_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    uaddlv s0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
   %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3)
   ret i32 %tmp5
 }
 
-define i64 @uaddlv4s_from_v4i32(<4 x i32>* %A) nounwind {
+define i64 @uaddlv4s_from_v4i32(ptr %A) nounwind {
 ; CHECK-LABEL: uaddlv4s_from_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    uaddlv d0, v0.4s
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp1 = load <4 x i32>, ptr %A
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
   %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3)
   ret i64 %tmp5
 }
 
-define i32 @uaddlv4h_from_v4i16(<4 x i16>* %A) nounwind {
+define i32 @uaddlv4h_from_v4i16(ptr %A) nounwind {
 ; CHECK-LABEL: uaddlv4h_from_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    uaddlv s0, v0.4h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp1 = load <4 x i16>, ptr %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
   %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3)
   ret i32 %tmp5
@@ -86,66 +86,66 @@ define i32 @uaddlv4h_from_v4i16(<4 x i16>* %A) nounwind {
 
 
 
-define i16 @saddlv4h_from_v8i8(<8 x i8>* %A) nounwind {
+define i16 @saddlv4h_from_v8i8(ptr %A) nounwind {
 ; CHECK-LABEL: saddlv4h_from_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    saddlv h0, v0.8b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i8>, <8 x i8>* %A
+  %tmp1 = load <8 x i8>, ptr %A
   %tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
   %tmp5 = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> %tmp3)
   ret i16 %tmp5
 }
 
-define i16 @saddlv16b_from_v16i8(<16 x i8>* %A) nounwind {
+define i16 @saddlv16b_from_v16i8(ptr %A) nounwind {
 ; CHECK-LABEL: saddlv16b_from_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    saddlv h0, v0.16b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <16 x i8>, <16 x i8>* %A
+  %tmp1 = load <16 x i8>, ptr %A
   %tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
   %tmp5 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %tmp3)
   ret i16 %tmp5
 }
 
-define i32 @saddlv8h_from_v8i16(<8 x i16>* %A) nounwind {
+define i32 @saddlv8h_from_v8i16(ptr %A) nounwind {
 ; CHECK-LABEL: saddlv8h_from_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    saddlv s0, v0.8h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <8 x i16>, <8 x i16>* %A
+  %tmp1 = load <8 x i16>, ptr %A
   %tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
   %tmp5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp3)
   ret i32 %tmp5
 }
 
-define i64 @saddlv4s_from_v4i32(<4 x i32>* %A) nounwind {
+define i64 @saddlv4s_from_v4i32(ptr %A) nounwind {
 ; CHECK-LABEL: saddlv4s_from_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    saddlv d0, v0.4s
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i32>, <4 x i32>* %A
+  %tmp1 = load <4 x i32>, ptr %A
   %tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
   %tmp5 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %tmp3)
   ret i64 %tmp5
 }
 
-define i32 @saddlv4h_from_v4i16(<4 x i16>* %A) nounwind {
+define i32 @saddlv4h_from_v4i16(ptr %A) nounwind {
 ; CHECK-LABEL: saddlv4h_from_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    saddlv s0, v0.4h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %tmp1 = load <4 x i16>, <4 x i16>* %A
+  %tmp1 = load <4 x i16>, ptr %A
   %tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
   %tmp5 = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> %tmp3)
   ret i32 %tmp5

diff  --git a/llvm/test/CodeGen/AArch64/neon-dotpattern.ll b/llvm/test/CodeGen/AArch64/neon-dotpattern.ll
index a7a2056acdd0e..6582dd9e38226 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotpattern.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotpattern.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+dotprod    < %s | FileCheck %s
 
-define fastcc void @test_sdot_v4i8(i8* noalias nocapture %0, i8* noalias nocapture readonly %1, i8* noalias nocapture readonly %2) {
+define fastcc void @test_sdot_v4i8(ptr noalias nocapture %0, ptr noalias nocapture readonly %1, ptr noalias nocapture readonly %2) {
 ; CHECK-LABEL: test_sdot_v4i8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x2]
@@ -14,41 +14,40 @@ define fastcc void @test_sdot_v4i8(i8* noalias nocapture %0, i8* noalias nocaptu
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %3 = bitcast i8* %0 to i32*
-  %4 = load i8, i8* %1, align 1
-  %5 = sext i8 %4 to i32
-  %6 = load i8, i8* %2, align 1
-  %7 = sext i8 %6 to i32
-  %8 = mul nsw i32 %7, %5
-  %9 = getelementptr inbounds i8, i8* %1, i64 1
-  %10 = load i8, i8* %9, align 1
-  %11 = sext i8 %10 to i32
-  %12 = getelementptr inbounds i8, i8* %2, i64 1
-  %13 = load i8, i8* %12, align 1
-  %14 = sext i8 %13 to i32
-  %15 = mul nsw i32 %14, %11
-  %16 = add nsw i32 %15, %8
-  %17 = getelementptr inbounds i8, i8* %1, i64 2
-  %18 = load i8, i8* %17, align 1
-  %19 = sext i8 %18 to i32
-  %20 = getelementptr inbounds i8, i8* %2, i64 2
-  %21 = load i8, i8* %20, align 1
-  %22 = sext i8 %21 to i32
-  %23 = mul nsw i32 %22, %19
-  %24 = add nsw i32 %23, %16
-  %25 = getelementptr inbounds i8, i8* %1, i64 3
-  %26 = load i8, i8* %25, align 1
-  %27 = sext i8 %26 to i32
-  %28 = getelementptr inbounds i8, i8* %2, i64 3
-  %29 = load i8, i8* %28, align 1
-  %30 = sext i8 %29 to i32
-  %31 = mul nsw i32 %30, %27
-  %32 = add nsw i32 %31, %24
-  store i32 %32, i32* %3, align 64
+  %3 = load i8, ptr %1, align 1
+  %4 = sext i8 %3 to i32
+  %5 = load i8, ptr %2, align 1
+  %6 = sext i8 %5 to i32
+  %7 = mul nsw i32 %6, %4
+  %8 = getelementptr inbounds i8, ptr %1, i64 1
+  %9 = load i8, ptr %8, align 1
+  %10 = sext i8 %9 to i32
+  %11 = getelementptr inbounds i8, ptr %2, i64 1
+  %12 = load i8, ptr %11, align 1
+  %13 = sext i8 %12 to i32
+  %14 = mul nsw i32 %13, %10
+  %15 = add nsw i32 %14, %7
+  %16 = getelementptr inbounds i8, ptr %1, i64 2
+  %17 = load i8, ptr %16, align 1
+  %18 = sext i8 %17 to i32
+  %19 = getelementptr inbounds i8, ptr %2, i64 2
+  %20 = load i8, ptr %19, align 1
+  %21 = sext i8 %20 to i32
+  %22 = mul nsw i32 %21, %18
+  %23 = add nsw i32 %22, %15
+  %24 = getelementptr inbounds i8, ptr %1, i64 3
+  %25 = load i8, ptr %24, align 1
+  %26 = sext i8 %25 to i32
+  %27 = getelementptr inbounds i8, ptr %2, i64 3
+  %28 = load i8, ptr %27, align 1
+  %29 = sext i8 %28 to i32
+  %30 = mul nsw i32 %29, %26
+  %31 = add nsw i32 %30, %23
+  store i32 %31, ptr %0, align 64
   ret void
 }
 
-define fastcc void @test_udot_v4i8(i8* noalias nocapture %0, i8* noalias nocapture readonly %1, i8* noalias nocapture readonly %2) {
+define fastcc void @test_udot_v4i8(ptr noalias nocapture %0, ptr noalias nocapture readonly %1, ptr noalias nocapture readonly %2) {
 ; CHECK-LABEL: test_udot_v4i8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x2]
@@ -61,36 +60,35 @@ define fastcc void @test_udot_v4i8(i8* noalias nocapture %0, i8* noalias nocaptu
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  %3 = bitcast i8* %0 to i32*
-  %4 = load i8, i8* %1, align 1
-  %5 = zext i8 %4 to i32
-  %6 = load i8, i8* %2, align 1
-  %7 = zext i8 %6 to i32
-  %8 = mul nsw i32 %7, %5
-  %9 = getelementptr inbounds i8, i8* %1, i64 1
-  %10 = load i8, i8* %9, align 1
-  %11 = zext i8 %10 to i32
-  %12 = getelementptr inbounds i8, i8* %2, i64 1
-  %13 = load i8, i8* %12, align 1
-  %14 = zext i8 %13 to i32
-  %15 = mul nsw i32 %14, %11
-  %16 = add nsw i32 %15, %8
-  %17 = getelementptr inbounds i8, i8* %1, i64 2
-  %18 = load i8, i8* %17, align 1
-  %19 = zext i8 %18 to i32
-  %20 = getelementptr inbounds i8, i8* %2, i64 2
-  %21 = load i8, i8* %20, align 1
-  %22 = zext i8 %21 to i32
-  %23 = mul nsw i32 %22, %19
-  %24 = add nsw i32 %23, %16
-  %25 = getelementptr inbounds i8, i8* %1, i64 3
-  %26 = load i8, i8* %25, align 1
-  %27 = zext i8 %26 to i32
-  %28 = getelementptr inbounds i8, i8* %2, i64 3
-  %29 = load i8, i8* %28, align 1
-  %30 = zext i8 %29 to i32
-  %31 = mul nsw i32 %30, %27
-  %32 = add nsw i32 %31, %24
-  store i32 %32, i32* %3, align 64
+  %3 = load i8, ptr %1, align 1
+  %4 = zext i8 %3 to i32
+  %5 = load i8, ptr %2, align 1
+  %6 = zext i8 %5 to i32
+  %7 = mul nsw i32 %6, %4
+  %8 = getelementptr inbounds i8, ptr %1, i64 1
+  %9 = load i8, ptr %8, align 1
+  %10 = zext i8 %9 to i32
+  %11 = getelementptr inbounds i8, ptr %2, i64 1
+  %12 = load i8, ptr %11, align 1
+  %13 = zext i8 %12 to i32
+  %14 = mul nsw i32 %13, %10
+  %15 = add nsw i32 %14, %7
+  %16 = getelementptr inbounds i8, ptr %1, i64 2
+  %17 = load i8, ptr %16, align 1
+  %18 = zext i8 %17 to i32
+  %19 = getelementptr inbounds i8, ptr %2, i64 2
+  %20 = load i8, ptr %19, align 1
+  %21 = zext i8 %20 to i32
+  %22 = mul nsw i32 %21, %18
+  %23 = add nsw i32 %22, %15
+  %24 = getelementptr inbounds i8, ptr %1, i64 3
+  %25 = load i8, ptr %24, align 1
+  %26 = zext i8 %25 to i32
+  %27 = getelementptr inbounds i8, ptr %2, i64 3
+  %28 = load i8, ptr %27, align 1
+  %29 = zext i8 %28 to i32
+  %30 = mul nsw i32 %29, %26
+  %31 = add nsw i32 %30, %23
+  store i32 %31, ptr %0, align 64
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index b98ce73251af3..227ac8d6c5790 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -4,7 +4,7 @@
 declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
 
-define i32 @test_udot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+define i32 @test_udot_v8i8(ptr nocapture readonly %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test_udot_v8i8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -15,18 +15,16 @@ define i32 @test_udot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b)
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0
-  %2 = zext <8 x i8> %1 to <8 x i32>
-  %3 = bitcast i8* %b to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3
-  %5 = zext <8 x i8> %4 to <8 x i32>
-  %6 = mul nuw nsw <8 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  ret i32 %7
+  %0 = load <8 x i8>, ptr %a
+  %1 = zext <8 x i8> %0 to <8 x i32>
+  %2 = load <8 x i8>, ptr %b
+  %3 = zext <8 x i8> %2 to <8 x i32>
+  %4 = mul nuw nsw <8 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @test_udot_v8i8_nomla(i8* nocapture readonly %a1) {
+define i32 @test_udot_v8i8_nomla(ptr nocapture readonly %a1) {
 ; CHECK-LABEL: test_udot_v8i8_nomla:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.8b, #1
@@ -37,14 +35,13 @@ define i32 @test_udot_v8i8_nomla(i8* nocapture readonly %a1) {
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a1 to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0
-  %2 = zext <8 x i8> %1 to <8 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
-  ret i32 %3
+  %0 = load <8 x i8>, ptr %a1
+  %1 = zext <8 x i8> %0 to <8 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @test_sdot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+define i32 @test_sdot_v8i8(ptr nocapture readonly %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test_sdot_v8i8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -55,18 +52,16 @@ define i32 @test_sdot_v8i8(i8* nocapture readonly %a, i8* nocapture readonly %b)
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0
-  %2 = sext <8 x i8> %1 to <8 x i32>
-  %3 = bitcast i8* %b to <8 x i8>*
-  %4 = load <8 x i8>, <8 x i8>* %3
-  %5 = sext <8 x i8> %4 to <8 x i32>
-  %6 = mul nsw <8 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %6)
-  ret i32 %7
+  %0 = load <8 x i8>, ptr %a
+  %1 = sext <8 x i8> %0 to <8 x i32>
+  %2 = load <8 x i8>, ptr %b
+  %3 = sext <8 x i8> %2 to <8 x i32>
+  %4 = mul nsw <8 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %4)
+  ret i32 %5
 }
 
-define i32 @test_sdot_v8i8_nomla(i8* nocapture readonly %a1) {
+define i32 @test_sdot_v8i8_nomla(ptr nocapture readonly %a1) {
 ; CHECK-LABEL: test_sdot_v8i8_nomla:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.8b, #1
@@ -77,15 +72,14 @@ define i32 @test_sdot_v8i8_nomla(i8* nocapture readonly %a1) {
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a1 to <8 x i8>*
-  %1 = load <8 x i8>, <8 x i8>* %0
-  %2 = sext <8 x i8> %1 to <8 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %2)
-  ret i32 %3
+  %0 = load <8 x i8>, ptr %a1
+  %1 = sext <8 x i8> %0 to <8 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
+  ret i32 %2
 }
 
 
-define i32 @test_udot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
+define i32 @test_udot_v16i8(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %sum) {
 ; CHECK-LABEL: test_udot_v16i8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -97,19 +91,17 @@ define i32 @test_udot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b
 ; CHECK-NEXT:    add w0, w8, w2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %3 = bitcast i8* %b to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3
-  %5 = zext <16 x i8> %4 to <16 x i32>
-  %6 = mul nuw nsw <16 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
-  %op.extra = add i32 %7, %sum
+  %0 = load <16 x i8>, ptr %a
+  %1 = zext <16 x i8> %0 to <16 x i32>
+  %2 = load <16 x i8>, ptr %b
+  %3 = zext <16 x i8> %2 to <16 x i32>
+  %4 = mul nuw nsw <16 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %4)
+  %op.extra = add i32 %5, %sum
   ret i32 %op.extra
 }
 
-define i32 @test_udot_v16i8_nomla(i8* nocapture readonly %a1) {
+define i32 @test_udot_v16i8_nomla(ptr nocapture readonly %a1) {
 ; CHECK-LABEL: test_udot_v16i8_nomla:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.16b, #1
@@ -120,14 +112,13 @@ define i32 @test_udot_v16i8_nomla(i8* nocapture readonly %a1) {
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a1 to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  ret i32 %3
+  %0 = load <16 x i8>, ptr %a1
+  %1 = zext <16 x i8> %0 to <16 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  ret i32 %2
 }
 
-define i32 @test_sdot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %sum) {
+define i32 @test_sdot_v16i8(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %sum) {
 ; CHECK-LABEL: test_sdot_v16i8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
@@ -139,19 +130,17 @@ define i32 @test_sdot_v16i8(i8* nocapture readonly %a, i8* nocapture readonly %b
 ; CHECK-NEXT:    add w0, w8, w2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = sext <16 x i8> %1 to <16 x i32>
-  %3 = bitcast i8* %b to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3
-  %5 = sext <16 x i8> %4 to <16 x i32>
-  %6 = mul nsw <16 x i32> %5, %2
-  %7 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %6)
-  %op.extra = add nsw i32 %7, %sum
+  %0 = load <16 x i8>, ptr %a
+  %1 = sext <16 x i8> %0 to <16 x i32>
+  %2 = load <16 x i8>, ptr %b
+  %3 = sext <16 x i8> %2 to <16 x i32>
+  %4 = mul nsw <16 x i32> %3, %1
+  %5 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %4)
+  %op.extra = add nsw i32 %5, %sum
   ret i32 %op.extra
 }
 
-define i32 @test_sdot_v16i8_nomla(i8* nocapture readonly %a1) {
+define i32 @test_sdot_v16i8_nomla(ptr nocapture readonly %a1) {
 ; CHECK-LABEL: test_sdot_v16i8_nomla:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi v0.16b, #1
@@ -162,11 +151,10 @@ define i32 @test_sdot_v16i8_nomla(i8* nocapture readonly %a1) {
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a1 to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = sext <16 x i8> %1 to <16 x i32>
-  %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
-  ret i32 %3
+  %0 = load <16 x i8>, ptr %a1
+  %1 = sext <16 x i8> %0 to <16 x i32>
+  %2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
+  ret i32 %2
 }
 
 

diff  --git a/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll b/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
index b78486770ddaf..caf012a9a699f 100644
--- a/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
+++ b/llvm/test/CodeGen/AArch64/neon-fpextend_f16.ll
@@ -4,13 +4,13 @@
 ; This is testing that we can scalarize the v1f16 input to fp_extend even
 ; though the v1f64 result is legal.
 
-define <1 x double> @fpext_v1f16_v1f64(<1 x half>* %a) {
+define <1 x double> @fpext_v1f16_v1f64(ptr %a) {
 ; CHECK-LABEL: fpext_v1f16_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x0]
 ; CHECK-NEXT:    fcvt d0, h0
 ; CHECK-NEXT:    ret
-  %b = load <1 x half>, <1 x half>* %a
+  %b = load <1 x half>, ptr %a
   %c = fpext <1 x half> %b to <1 x double>
   ret <1 x double> %c
 }

diff  --git a/llvm/test/CodeGen/AArch64/neon-fpround_f128.ll b/llvm/test/CodeGen/AArch64/neon-fpround_f128.ll
index 265664ee9442e..64bb9934360fb 100644
--- a/llvm/test/CodeGen/AArch64/neon-fpround_f128.ll
+++ b/llvm/test/CodeGen/AArch64/neon-fpround_f128.ll
@@ -1,18 +1,18 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
 
-define <1 x double> @test_fpround_v1f128(<1 x fp128>* %a) {
+define <1 x double> @test_fpround_v1f128(ptr %a) {
 ; CHECK-LABEL: test_fpround_v1f128:
 ; CHECK: bl __trunctfdf2
-  %b = load <1 x fp128>, <1 x fp128>* %a
+  %b = load <1 x fp128>, ptr %a
   %c = fptrunc <1 x fp128> %b to <1 x double>
   ret <1 x double> %c
 }
 
-define <2 x double> @test_fpround_v2f128(<2 x fp128>* %a) {
+define <2 x double> @test_fpround_v2f128(ptr %a) {
 ; CHECK-LABEL: test_fpround_v2f128:
 ; CHECK: bl __trunctfdf2
 ; CHECK: bl __trunctfdf2
-  %b = load <2 x fp128>, <2 x fp128>* %a
+  %b = load <2 x fp128>, ptr %a
   %c = fptrunc <2 x fp128> %b to <2 x double>
   ret <2 x double> %c
 }

diff  --git a/llvm/test/CodeGen/AArch64/neon-sad.ll b/llvm/test/CodeGen/AArch64/neon-sad.ll
index d0466c615c8cd..c0cfe8d8ca3cc 100644
--- a/llvm/test/CodeGen/AArch64/neon-sad.ll
+++ b/llvm/test/CodeGen/AArch64/neon-sad.ll
@@ -4,7 +4,7 @@
 declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1 immarg)
 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
 
-define i32 @test_sad_v16i8_zext(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+define i32 @test_sad_v16i8_zext(ptr nocapture readonly %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test_sad_v16i8_zext:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -15,19 +15,17 @@ define i32 @test_sad_v16i8_zext(i8* nocapture readonly %a, i8* nocapture readonl
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = zext <16 x i8> %1 to <16 x i32>
-  %3 = bitcast i8* %b to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3
-  %5 = zext <16 x i8> %4 to <16 x i32>
-  %6 = sub nsw <16 x i32> %5, %2
-  %7 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %6, i1 true)
-  %8 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %7)
-  ret i32 %8
+  %0 = load <16 x i8>, ptr %a
+  %1 = zext <16 x i8> %0 to <16 x i32>
+  %2 = load <16 x i8>, ptr %b
+  %3 = zext <16 x i8> %2 to <16 x i32>
+  %4 = sub nsw <16 x i32> %3, %1
+  %5 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+  %6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+  ret i32 %6
 }
 
-define i32 @test_sad_v16i8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b) {
+define i32 @test_sad_v16i8_sext(ptr nocapture readonly %a, ptr nocapture readonly %b) {
 ; CHECK-LABEL: test_sad_v16i8_sext:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -38,14 +36,12 @@ define i32 @test_sad_v16i8_sext(i8* nocapture readonly %a, i8* nocapture readonl
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 entry:
-  %0 = bitcast i8* %a to <16 x i8>*
-  %1 = load <16 x i8>, <16 x i8>* %0
-  %2 = sext <16 x i8> %1 to <16 x i32>
-  %3 = bitcast i8* %b to <16 x i8>*
-  %4 = load <16 x i8>, <16 x i8>* %3
-  %5 = sext <16 x i8> %4 to <16 x i32>
-  %6 = sub nsw <16 x i32> %5, %2
-  %7 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %6, i1 true)
-  %8 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %7)
-  ret i32 %8
+  %0 = load <16 x i8>, ptr %a
+  %1 = sext <16 x i8> %0 to <16 x i32>
+  %2 = load <16 x i8>, ptr %b
+  %3 = sext <16 x i8> %2 to <16 x i32>
+  %4 = sub nsw <16 x i32> %3, %1
+  %5 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+  %6 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+  ret i32 %6
 }

diff  --git a/llvm/test/CodeGen/AArch64/neon-truncstore.ll b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
index 923e337317108..083cc00c8c1b7 100644
--- a/llvm/test/CodeGen/AArch64/neon-truncstore.ll
+++ b/llvm/test/CodeGen/AArch64/neon-truncstore.ll
@@ -4,29 +4,29 @@
 ; A vector TruncStore can not be selected.
 ; Test a trunc IR and a vector store IR can be selected correctly.
 
-define void @v2i64_v2i32(<2 x i64> %a, <2 x i32>* %result) {
+define void @v2i64_v2i32(<2 x i64> %a, ptr %result) {
 ; CHECK-LABEL: v2i64_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <2 x i64> %a to <2 x i32>
-  store <2 x i32> %b, <2 x i32>* %result
+  store <2 x i32> %b, ptr %result
   ret void
 }
 
-define void @v4i64_v4i32(<4 x i64> %a, <4 x i32>* %result) {
+define void @v4i64_v4i32(<4 x i64> %a, ptr %result) {
 ; CHECK-LABEL: v4i64_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <4 x i64> %a to <4 x i32>
-  store <4 x i32> %b, <4 x i32>* %result
+  store <4 x i32> %b, ptr %result
   ret void
 }
 
-define void @v8i64_v8i32(<8 x i64> %a, <8 x i32>* %result) {
+define void @v8i64_v8i32(<8 x i64> %a, ptr %result) {
 ; CHECK-LABEL: v8i64_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v2.4s, v2.4s, v3.4s
@@ -34,11 +34,11 @@ define void @v8i64_v8i32(<8 x i64> %a, <8 x i32>* %result) {
 ; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <8 x i64> %a to <8 x i32>
-  store <8 x i32> %b, <8 x i32>* %result
+  store <8 x i32> %b, ptr %result
   ret void
 }
 
-define void @v2i32_v2i16(<2 x i32> %a, <2 x i16>* %result) {
+define void @v2i32_v2i16(<2 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v2i32_v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -48,33 +48,33 @@ define void @v2i32_v2i16(<2 x i32> %a, <2 x i16>* %result) {
 ; CHECK-NEXT:    strh w8, [x0, #2]
 ; CHECK-NEXT:    ret
   %b = trunc <2 x i32> %a to <2 x i16>
-  store <2 x i16> %b, <2 x i16>* %result
+  store <2 x i16> %b, ptr %result
   ret void
 }
 
-define void @v4i32_v4i16(<4 x i32> %a, <4 x i16>* %result) {
+define void @v4i32_v4i16(<4 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v4i32_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    xtn v0.4h, v0.4s
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <4 x i32> %a to <4 x i16>
-  store <4 x i16> %b, <4 x i16>* %result
+  store <4 x i16> %b, ptr %result
   ret void
 }
 
-define void @v8i32_v8i16(<8 x i32> %a, <8 x i16>* %result) {
+define void @v8i32_v8i16(<8 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v8i32_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <8 x i32> %a to <8 x i16>
-  store <8 x i16> %b, <8 x i16>* %result
+  store <8 x i16> %b, ptr %result
   ret void
 }
 
-define void @v16i32_v16i16(<16 x i32> %a, <16 x i16>* %result) {
+define void @v16i32_v16i16(<16 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v16i32_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
@@ -82,11 +82,11 @@ define void @v16i32_v16i16(<16 x i32> %a, <16 x i16>* %result) {
 ; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <16 x i32> %a to <16 x i16>
-  store <16 x i16> %b, <16 x i16>* %result
+  store <16 x i16> %b, ptr %result
   ret void
 }
 
-define void @v2i32_v2i8(<2 x i32> %a, <2 x i8>* %result) {
+define void @v2i32_v2i8(<2 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v2i32_v2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -96,11 +96,11 @@ define void @v2i32_v2i8(<2 x i32> %a, <2 x i8>* %result) {
 ; CHECK-NEXT:    strb w8, [x0, #1]
 ; CHECK-NEXT:    ret
   %b = trunc <2 x i32> %a to <2 x i8>
-  store <2 x i8> %b, <2 x i8>* %result
+  store <2 x i8> %b, ptr %result
   ret void
 }
 
-define void @v4i32_v4i8(<4 x i32> %a, <4 x i8>* %result) {
+define void @v4i32_v4i8(<4 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v4i32_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    xtn v0.4h, v0.4s
@@ -108,11 +108,11 @@ define void @v4i32_v4i8(<4 x i32> %a, <4 x i8>* %result) {
 ; CHECK-NEXT:    str s0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <4 x i32> %a to <4 x i8>
-  store <4 x i8> %b, <4 x i8>* %result
+  store <4 x i8> %b, ptr %result
   ret void
 }
 
-define void @v8i32_v8i8(<8 x i32> %a, <8 x i8>* %result) {
+define void @v8i32_v8i8(<8 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v8i32_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v0.8h, v0.8h, v1.8h
@@ -120,11 +120,11 @@ define void @v8i32_v8i8(<8 x i32> %a, <8 x i8>* %result) {
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <8 x i32> %a to <8 x i8>
-  store <8 x i8> %b, <8 x i8>* %result
+  store <8 x i8> %b, ptr %result
   ret void
 }
 
-define void @v16i32_v16i8(<16 x i32> %a, <16 x i8>* %result) {
+define void @v16i32_v16i8(<16 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v16i32_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
@@ -133,11 +133,11 @@ define void @v16i32_v16i8(<16 x i32> %a, <16 x i8>* %result) {
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <16 x i32> %a to <16 x i8>
-  store <16 x i8> %b, <16 x i8>* %result
+  store <16 x i8> %b, ptr %result
   ret void
 }
 
-define void @v32i32_v32i8(<32 x i32> %a, <32 x i8>* %result) {
+define void @v32i32_v32i8(<32 x i32> %a, ptr %result) {
 ; CHECK-LABEL: v32i32_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v6.8h, v6.8h, v7.8h
@@ -149,11 +149,11 @@ define void @v32i32_v32i8(<32 x i32> %a, <32 x i8>* %result) {
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <32 x i32> %a to <32 x i8>
-  store <32 x i8> %b, <32 x i8>* %result
+  store <32 x i8> %b, ptr %result
   ret void
 }
 
-define void @v2i16_v2i8(<2 x i16> %a, <2 x i8>* %result) {
+define void @v2i16_v2i8(<2 x i16> %a, ptr %result) {
 ; CHECK-LABEL: v2i16_v2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -163,11 +163,11 @@ define void @v2i16_v2i8(<2 x i16> %a, <2 x i8>* %result) {
 ; CHECK-NEXT:    strb w8, [x0, #1]
 ; CHECK-NEXT:    ret
   %b = trunc <2 x i16> %a to <2 x i8>
-  store <2 x i8> %b, <2 x i8>* %result
+  store <2 x i8> %b, ptr %result
   ret void
 }
 
-define void @v4i16_v4i8(<4 x i16> %a, <4 x i8>* %result) {
+define void @v4i16_v4i8(<4 x i16> %a, ptr %result) {
 ; CHECK-LABEL: v4i16_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
@@ -175,33 +175,33 @@ define void @v4i16_v4i8(<4 x i16> %a, <4 x i8>* %result) {
 ; CHECK-NEXT:    str s0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <4 x i16> %a to <4 x i8>
-  store <4 x i8> %b, <4 x i8>* %result
+  store <4 x i8> %b, ptr %result
   ret void
 }
 
-define void @v8i16_v8i8(<8 x i16> %a, <8 x i8>* %result) {
+define void @v8i16_v8i8(<8 x i16> %a, ptr %result) {
 ; CHECK-LABEL: v8i16_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <8 x i16> %a to <8 x i8>
-  store <8 x i8> %b, <8 x i8>* %result
+  store <8 x i8> %b, ptr %result
   ret void
 }
 
-define void @v16i16_v16i8(<16 x i16> %a, <16 x i8>* %result) {
+define void @v16i16_v16i8(<16 x i16> %a, ptr %result) {
 ; CHECK-LABEL: v16i16_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <16 x i16> %a to <16 x i8>
-  store <16 x i8> %b, <16 x i8>* %result
+  store <16 x i8> %b, ptr %result
   ret void
 }
 
-define void @v32i16_v32i8(<32 x i16> %a, <32 x i8>* %result) {
+define void @v32i16_v32i8(<32 x i16> %a, ptr %result) {
 ; CHECK-LABEL: v32i16_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
@@ -209,6 +209,6 @@ define void @v32i16_v32i8(<32 x i16> %a, <32 x i8>* %result) {
 ; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
   %b = trunc <32 x i16> %a to <32 x i8>
-  store <32 x i8> %b, <32 x i8>* %result
+  store <32 x i8> %b, ptr %result
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/neon-vmull-high-p64.ll b/llvm/test/CodeGen/AArch64/neon-vmull-high-p64.ll
index 4d83d707737b8..0c4963dfdcedd 100644
--- a/llvm/test/CodeGen/AArch64/neon-vmull-high-p64.ll
+++ b/llvm/test/CodeGen/AArch64/neon-vmull-high-p64.ll
@@ -44,19 +44,18 @@
 %struct.SS = type { <2 x i64>, <2 x i64> }
 
 ; Function Attrs: nofree noinline nounwind
-define dso_local void @func(%struct.SS* nocapture readonly %g, i32 %count, i8* nocapture readonly %buf, i128* nocapture %res) local_unnamed_addr #0 {
+define dso_local void @func(ptr nocapture readonly %g, i32 %count, ptr nocapture readonly %buf, ptr nocapture %res) local_unnamed_addr #0 {
 entry:
-  %h2 = getelementptr inbounds %struct.SS, %struct.SS* %g, i64 0, i32 1
-  %0 = load <2 x i64>, <2 x i64>* %h2, align 16
+  %h2 = getelementptr inbounds %struct.SS, ptr %g, i64 0, i32 1
+  %0 = load <2 x i64>, ptr %h2, align 16
   %cmp34 = icmp eq i32 %count, 0
   br i1 %cmp34, label %for.cond.cleanup, label %for.body.lr.ph
 
 for.body.lr.ph:                                   ; preds = %entry
-  %1 = bitcast %struct.SS* %g to <16 x i8>*
-  %2 = load <16 x i8>, <16 x i8>* %1, align 16
-  %3 = extractelement <2 x i64> %0, i32 0
-  %4 = extractelement <2 x i64> %0, i32 1
-  %5 = zext i32 %count to i64
+  %1 = load <16 x i8>, ptr %g, align 16
+  %2 = extractelement <2 x i64> %0, i32 0
+  %3 = extractelement <2 x i64> %0, i32 1
+  %4 = zext i32 %count to i64
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %entry
@@ -64,27 +63,24 @@ for.cond.cleanup:                                 ; preds = %for.body, %entry
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
-  %buf.addr.036 = phi i8* [ %buf, %for.body.lr.ph ], [ %add.ptr, %for.body ]
-  %6 = phi <16 x i8> [ %2, %for.body.lr.ph ], [ %xor.i, %for.body ]
-  %7 = bitcast i8* %buf.addr.036 to <16 x i8>*
-  %8 = load <16 x i8>, <16 x i8>* %7, align 16
-  %vrbit.i = call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %8) #0
-  %xor.i = xor <16 x i8> %vrbit.i, %6
-  %9 = bitcast <16 x i8> %xor.i to <2 x i64>
-  %10 = extractelement <2 x i64> %9, i32 0
-  %vmull_p64.i = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %10, i64 %3) #0
-  %arrayidx = getelementptr inbounds i128, i128* %res, i64 %indvars.iv
-  %11 = bitcast i128* %arrayidx to <16 x i8>*
-  store <16 x i8> %vmull_p64.i, <16 x i8>* %11, align 16
-  %12 = extractelement <2 x i64> %9, i32 1
-  %vmull_p64.i.i = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %12, i64 %4) #0
-  %13 = or i64 %indvars.iv, 1
-  %arrayidx16 = getelementptr inbounds i128, i128* %res, i64 %13
-  %14 = bitcast i128* %arrayidx16 to <16 x i8>*
-  store <16 x i8> %vmull_p64.i.i, <16 x i8>* %14, align 16
+  %buf.addr.036 = phi ptr [ %buf, %for.body.lr.ph ], [ %add.ptr, %for.body ]
+  %5 = phi <16 x i8> [ %1, %for.body.lr.ph ], [ %xor.i, %for.body ]
+  %6 = load <16 x i8>, ptr %buf.addr.036, align 16
+  %vrbit.i = call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %6) #0
+  %xor.i = xor <16 x i8> %vrbit.i, %5
+  %7 = bitcast <16 x i8> %xor.i to <2 x i64>
+  %8 = extractelement <2 x i64> %7, i32 0
+  %vmull_p64.i = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %8, i64 %2) #0
+  %arrayidx = getelementptr inbounds i128, ptr %res, i64 %indvars.iv
+  store <16 x i8> %vmull_p64.i, ptr %arrayidx, align 16
+  %9 = extractelement <2 x i64> %7, i32 1
+  %vmull_p64.i.i = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %9, i64 %3) #0
+  %10 = or i64 %indvars.iv, 1
+  %arrayidx16 = getelementptr inbounds i128, ptr %res, i64 %10
+  store <16 x i8> %vmull_p64.i.i, ptr %arrayidx16, align 16
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
-  %add.ptr = getelementptr inbounds i8, i8* %buf.addr.036, i64 16
-  %cmp = icmp ult i64 %indvars.iv.next, %5
+  %add.ptr = getelementptr inbounds i8, ptr %buf.addr.036, i64 16
+  %cmp = icmp ult i64 %indvars.iv.next, %4
   br i1 %cmp, label %for.body, label %for.cond.cleanup 
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/nest-register.ll b/llvm/test/CodeGen/AArch64/nest-register.ll
index b8651714be340..1e1c1b044bab6 100644
--- a/llvm/test/CodeGen/AArch64/nest-register.ll
+++ b/llvm/test/CodeGen/AArch64/nest-register.ll
@@ -3,21 +3,21 @@
 ; Tests that the 'nest' parameter attribute causes the relevant parameter to be
 ; passed in the right register.
 
-define i8* @nest_receiver(i8* nest %arg) nounwind {
+define ptr @nest_receiver(ptr nest %arg) nounwind {
 ; CHECK-LABEL: nest_receiver:
 ; CHECK-NEXT: // %bb.0:
 ; CHECK-NEXT: mov x0, x18
 ; CHECK-NEXT: ret
 
-  ret i8* %arg
+  ret ptr %arg
 }
 
-define i8* @nest_caller(i8* %arg) nounwind {
+define ptr @nest_caller(ptr %arg) nounwind {
 ; CHECK-LABEL: nest_caller:
 ; CHECK: mov x18, x0
 ; CHECK-NEXT: bl nest_receiver
 ; CHECK: ret
 
-  %result = call i8* @nest_receiver(i8* nest %arg)
-  ret i8* %result
+  %result = call ptr @nest_receiver(ptr nest %arg)
+  ret ptr %result
 }

diff  --git a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
index 6840e844e604f..4fe16d8195c09 100644
--- a/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
+++ b/llvm/test/CodeGen/AArch64/no-quad-ldp-stp.ll
@@ -6,12 +6,10 @@
 ; SLOW: stur
 ; SLOW-NOT: stp
 ; FAST: stp
-define void @test_nopair_st(double* %ptr, <2 x double> %v1, <2 x double> %v2) {
-  %tmp1 = bitcast double* %ptr to <2 x double>*
-  store <2 x double> %v2, <2 x double>* %tmp1, align 16
-  %add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
-  %tmp = bitcast double* %add.ptr to <2 x double>*
-  store <2 x double> %v1, <2 x double>* %tmp, align 16
+define void @test_nopair_st(ptr %ptr, <2 x double> %v1, <2 x double> %v2) {
+  store <2 x double> %v2, ptr %ptr, align 16
+  %add.ptr = getelementptr inbounds double, ptr %ptr, i64 -2
+  store <2 x double> %v1, ptr %add.ptr, align 16
   ret void
 }
 
@@ -20,12 +18,10 @@ define void @test_nopair_st(double* %ptr, <2 x double> %v1, <2 x double> %v2) {
 ; SLOW: ldr
 ; SLOW-NOT: ldp
 ; FAST: ldp
-define <2 x i64> @test_nopair_ld(i64* %p) {
-  %a1 = bitcast i64* %p to <2 x i64>*
-  %tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
-  %add.ptr2 = getelementptr inbounds i64, i64* %p, i64 2
-  %a2 = bitcast i64* %add.ptr2 to <2 x i64>*
-  %tmp2 = load <2 x i64>, <2 x i64>* %a2, align 8
+define <2 x i64> @test_nopair_ld(ptr %p) {
+  %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
+  %add.ptr2 = getelementptr inbounds i64, ptr %p, i64 2
+  %tmp2 = load <2 x i64>, ptr %add.ptr2, align 8
   %add = add nsw <2 x i64> %tmp1, %tmp2
   ret <2 x i64> %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/no_cfi.ll b/llvm/test/CodeGen/AArch64/no_cfi.ll
index 49e34b3c5e116..452b7485b61cc 100644
--- a/llvm/test/CodeGen/AArch64/no_cfi.ll
+++ b/llvm/test/CodeGen/AArch64/no_cfi.ll
@@ -7,7 +7,7 @@
 ; CHECK:                ret
 define void @a() nounwind {
   %1 = alloca i32, align 4
-  store i32 1, i32* %1, align 4
+  store i32 1, ptr %1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/nontemporal-load.ll b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
index f8ff50b6e4c54..5ccf6b562b42b 100644
--- a/llvm/test/CodeGen/AArch64/nontemporal-load.ll
+++ b/llvm/test/CodeGen/AArch64/nontemporal-load.ll
@@ -2,7 +2,7 @@
 ; RUN: llc --mattr=+sve < %s -mtriple aarch64-apple-darwin | FileCheck %s
 ; RUN: llc --mattr=+sve < %s -mtriple aarch64_be-unknown-unknown | FileCheck --check-prefix CHECK-BE %s
 
-define <4 x double> @test_ldnp_v4f64(<4 x double>* %A) {
+define <4 x double> @test_ldnp_v4f64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -12,11 +12,11 @@ define <4 x double> @test_ldnp_v4f64(<4 x double>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <4 x double>, <4 x double>* %A, align 8, !nontemporal !0
+  %lv = load <4 x double>, ptr %A, align 8, !nontemporal !0
   ret <4 x double> %lv
 }
 
-define <4 x i64> @test_ldnp_v4i64(<4 x i64>* %A) {
+define <4 x i64> @test_ldnp_v4i64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -26,11 +26,11 @@ define <4 x i64> @test_ldnp_v4i64(<4 x i64>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <4 x i64>, <4 x i64>* %A, align 8, !nontemporal !0
+  %lv = load <4 x i64>, ptr %A, align 8, !nontemporal !0
   ret <4 x i64> %lv
 }
 
-define <8 x i32> @test_ldnp_v8i32(<8 x i32>* %A) {
+define <8 x i32> @test_ldnp_v8i32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v8i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -40,11 +40,11 @@ define <8 x i32> @test_ldnp_v8i32(<8 x i32>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <8 x i32>, <8 x i32>* %A, align 8, !nontemporal !0
+  %lv = load <8 x i32>, ptr %A, align 8, !nontemporal !0
   ret <8 x i32> %lv
 }
 
-define <8 x float> @test_ldnp_v8f32(<8 x float>* %A) {
+define <8 x float> @test_ldnp_v8f32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v8f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -54,11 +54,11 @@ define <8 x float> @test_ldnp_v8f32(<8 x float>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <8 x float>, <8 x float>* %A, align 8, !nontemporal !0
+  %lv = load <8 x float>, ptr %A, align 8, !nontemporal !0
   ret <8 x float> %lv
 }
 
-define <16 x i16> @test_ldnp_v16i16(<16 x i16>* %A) {
+define <16 x i16> @test_ldnp_v16i16(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v16i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -68,11 +68,11 @@ define <16 x i16> @test_ldnp_v16i16(<16 x i16>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <16 x i16>, <16 x i16>* %A, align 8, !nontemporal !0
+  %lv = load <16 x i16>, ptr %A, align 8, !nontemporal !0
   ret <16 x i16> %lv
 }
 
-define <16 x half> @test_ldnp_v16f16(<16 x half>* %A) {
+define <16 x half> @test_ldnp_v16f16(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v16f16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -82,11 +82,11 @@ define <16 x half> @test_ldnp_v16f16(<16 x half>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <16 x half>, <16 x half>* %A, align 8, !nontemporal !0
+  %lv = load <16 x half>, ptr %A, align 8, !nontemporal !0
   ret <16 x half> %lv
 }
 
-define <32 x i8> @test_ldnp_v32i8(<32 x i8>* %A) {
+define <32 x i8> @test_ldnp_v32i8(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v32i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -96,11 +96,11 @@ define <32 x i8> @test_ldnp_v32i8(<32 x i8>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <32 x i8>, <32 x i8>* %A, align 8, !nontemporal !0
+  %lv = load <32 x i8>, ptr %A, align 8, !nontemporal !0
   ret <32 x i8> %lv
 }
 
-define <4 x i32> @test_ldnp_v4i32(<4 x i32>* %A) {
+define <4 x i32> @test_ldnp_v4i32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -110,11 +110,11 @@ define <4 x i32> @test_ldnp_v4i32(<4 x i32>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr q0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load<4 x i32>, <4 x i32>* %A, align 8, !nontemporal !0
+  %lv = load<4 x i32>, ptr %A, align 8, !nontemporal !0
   ret <4 x i32> %lv
 }
 
-define <4 x float> @test_ldnp_v4f32(<4 x float>* %A) {
+define <4 x float> @test_ldnp_v4f32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -124,11 +124,11 @@ define <4 x float> @test_ldnp_v4f32(<4 x float>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr q0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load<4 x float>, <4 x float>* %A, align 8, !nontemporal !0
+  %lv = load<4 x float>, ptr %A, align 8, !nontemporal !0
   ret <4 x float> %lv
 }
 
-define <8 x i16> @test_ldnp_v8i16(<8 x i16>* %A) {
+define <8 x i16> @test_ldnp_v8i16(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v8i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -138,11 +138,11 @@ define <8 x i16> @test_ldnp_v8i16(<8 x i16>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr q0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <8 x i16>, <8 x i16>* %A, align 8, !nontemporal !0
+  %lv = load <8 x i16>, ptr %A, align 8, !nontemporal !0
   ret <8 x i16> %lv
 }
 
-define <16 x i8> @test_ldnp_v16i8(<16 x i8>* %A) {
+define <16 x i8> @test_ldnp_v16i8(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v16i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -152,10 +152,10 @@ define <16 x i8> @test_ldnp_v16i8(<16 x i8>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr q0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <16 x i8>, <16 x i8>* %A, align 8, !nontemporal !0
+  %lv = load <16 x i8>, ptr %A, align 8, !nontemporal !0
   ret <16 x i8> %lv
 }
-define <2 x double> @test_ldnp_v2f64(<2 x double>* %A) {
+define <2 x double> @test_ldnp_v2f64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v2f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -165,11 +165,11 @@ define <2 x double> @test_ldnp_v2f64(<2 x double>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr q0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <2 x double>, <2 x double>* %A, align 8, !nontemporal !0
+  %lv = load <2 x double>, ptr %A, align 8, !nontemporal !0
   ret <2 x double> %lv
 }
 
-define <2 x i32> @test_ldnp_v2i32(<2 x i32>* %A) {
+define <2 x i32> @test_ldnp_v2i32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v2i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -179,11 +179,11 @@ define <2 x i32> @test_ldnp_v2i32(<2 x i32>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr d0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <2 x i32>, <2 x i32>* %A, align 8, !nontemporal !0
+  %lv = load <2 x i32>, ptr %A, align 8, !nontemporal !0
   ret <2 x i32> %lv
 }
 
-define <2 x float> @test_ldnp_v2f32(<2 x float>* %A) {
+define <2 x float> @test_ldnp_v2f32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v2f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -193,11 +193,11 @@ define <2 x float> @test_ldnp_v2f32(<2 x float>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr d0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <2 x float>, <2 x float>* %A, align 8, !nontemporal !0
+  %lv = load <2 x float>, ptr %A, align 8, !nontemporal !0
   ret <2 x float> %lv
 }
 
-define <4 x i16> @test_ldnp_v4i16(<4 x i16>* %A) {
+define <4 x i16> @test_ldnp_v4i16(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -207,11 +207,11 @@ define <4 x i16> @test_ldnp_v4i16(<4 x i16>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr d0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <4 x i16>, <4 x i16>* %A, align 8, !nontemporal !0
+  %lv = load <4 x i16>, ptr %A, align 8, !nontemporal !0
   ret <4 x i16> %lv
 }
 
-define <8 x i8> @test_ldnp_v8i8(<8 x i8>* %A) {
+define <8 x i8> @test_ldnp_v8i8(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v8i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -221,11 +221,11 @@ define <8 x i8> @test_ldnp_v8i8(<8 x i8>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr d0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <8 x i8>, <8 x i8>* %A, align 8, !nontemporal !0
+  %lv = load <8 x i8>, ptr %A, align 8, !nontemporal !0
   ret <8 x i8> %lv
 }
 
-define <1 x double> @test_ldnp_v1f64(<1 x double>* %A) {
+define <1 x double> @test_ldnp_v1f64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v1f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -235,11 +235,11 @@ define <1 x double> @test_ldnp_v1f64(<1 x double>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr d0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <1 x double>, <1 x double>* %A, align 8, !nontemporal !0
+  %lv = load <1 x double>, ptr %A, align 8, !nontemporal !0
   ret <1 x double> %lv
 }
 
-define <1 x i64> @test_ldnp_v1i64(<1 x i64>* %A) {
+define <1 x i64> @test_ldnp_v1i64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v1i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -249,11 +249,11 @@ define <1 x i64> @test_ldnp_v1i64(<1 x i64>* %A) {
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr d0, [x0]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <1 x i64>, <1 x i64>* %A, align 8, !nontemporal !0
+  %lv = load <1 x i64>, ptr %A, align 8, !nontemporal !0
   ret <1 x i64> %lv
 }
 
-define <32 x i16> @test_ldnp_v32i16(<32 x i16>* %A) {
+define <32 x i16> @test_ldnp_v32i16(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v32i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -265,11 +265,11 @@ define <32 x i16> @test_ldnp_v32i16(<32 x i16>* %A) {
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ldp q2, q3, [x0, #32]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <32 x i16>, <32 x i16>* %A, align 8, !nontemporal !0
+  %lv = load <32 x i16>, ptr %A, align 8, !nontemporal !0
   ret <32 x i16> %lv
 }
 
-define <32 x half> @test_ldnp_v32f16(<32 x half>* %A) {
+define <32 x half> @test_ldnp_v32f16(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v32f16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -281,11 +281,11 @@ define <32 x half> @test_ldnp_v32f16(<32 x half>* %A) {
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ldp q2, q3, [x0, #32]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <32 x half>, <32 x half>* %A, align 8, !nontemporal !0
+  %lv = load <32 x half>, ptr %A, align 8, !nontemporal !0
   ret <32 x half> %lv
 }
 
-define <16 x i32> @test_ldnp_v16i32(<16 x i32>* %A) {
+define <16 x i32> @test_ldnp_v16i32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v16i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -297,11 +297,11 @@ define <16 x i32> @test_ldnp_v16i32(<16 x i32>* %A) {
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ldp q2, q3, [x0, #32]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <16 x i32>, <16 x i32>* %A, align 8, !nontemporal !0
+  %lv = load <16 x i32>, ptr %A, align 8, !nontemporal !0
   ret <16 x i32> %lv
 }
 
-define <16 x float> @test_ldnp_v16f32(<16 x float>* %A) {
+define <16 x float> @test_ldnp_v16f32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v16f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -313,11 +313,11 @@ define <16 x float> @test_ldnp_v16f32(<16 x float>* %A) {
 ; CHECK-BE-NEXT:    ldp q0, q1, [x0]
 ; CHECK-BE-NEXT:    ldp q2, q3, [x0, #32]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <16 x float>, <16 x float>* %A, align 8, !nontemporal !0
+  %lv = load <16 x float>, ptr %A, align 8, !nontemporal !0
   ret <16 x float> %lv
 }
 
-define <17 x float> @test_ldnp_v17f32(<17 x float>* %A) {
+define <17 x float> @test_ldnp_v17f32(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v17f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0, #32]
@@ -347,11 +347,11 @@ define <17 x float> @test_ldnp_v17f32(<17 x float>* %A) {
 ; CHECK-BE-NEXT:    st1 { v0.4s }, [x10]
 ; CHECK-BE-NEXT:    st1 { v4.4s }, [x8]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <17 x float>, <17 x float>* %A, align 8, !nontemporal !0
+  %lv = load <17 x float>, ptr %A, align 8, !nontemporal !0
   ret <17 x float> %lv
 }
 
-define <33 x double> @test_ldnp_v33f64(<33 x double>* %A) {
+define <33 x double> @test_ldnp_v33f64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v33f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -441,11 +441,11 @@ define <33 x double> @test_ldnp_v33f64(<33 x double>* %A) {
 ; CHECK-BE-NEXT:    st1 { v1.2d }, [x10]
 ; CHECK-BE-NEXT:    st1 { v0.2d }, [x8]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <33 x double>, <33 x double>* %A, align 8, !nontemporal !0
+  %lv = load <33 x double>, ptr %A, align 8, !nontemporal !0
   ret <33 x double> %lv
 }
 
-define <33 x i8> @test_ldnp_v33i8(<33 x i8>* %A) {
+define <33 x i8> @test_ldnp_v33i8(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v33i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -466,11 +466,11 @@ define <33 x i8> @test_ldnp_v33i8(<33 x i8>* %A) {
 ; CHECK-BE-NEXT:    st1 { v0.16b }, [x8]
 ; CHECK-BE-NEXT:    st1 { v1.16b }, [x10]
 ; CHECK-BE-NEXT:    ret
-  %lv = load<33 x i8>, <33 x i8>* %A, align 8, !nontemporal !0
+  %lv = load<33 x i8>, ptr %A, align 8, !nontemporal !0
   ret <33 x i8> %lv
 }
 
-define <4 x i65> @test_ldnp_v4i65(<4 x i65>* %A) {
+define <4 x i65> @test_ldnp_v4i65(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4i65:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldp x8, x9, [x0, #8]
@@ -520,11 +520,11 @@ define <4 x i65> @test_ldnp_v4i65(<4 x i65>* %A) {
 ; CHECK-BE-NEXT:    fmov x2, d1
 ; CHECK-BE-NEXT:    fmov x6, d3
 ; CHECK-BE-NEXT:    ret
-  %lv = load <4 x i65>, <4 x i65>* %A, align 8, !nontemporal !0
+  %lv = load <4 x i65>, ptr %A, align 8, !nontemporal !0
   ret <4 x i65> %lv
 }
 
-define <4 x i63> @test_ldnp_v4i63(<4 x i63>* %A) {
+define <4 x i63> @test_ldnp_v4i63(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v4i63:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldp x8, x9, [x0]
@@ -550,11 +550,11 @@ define <4 x i63> @test_ldnp_v4i63(<4 x i63>* %A) {
 ; CHECK-BE-NEXT:    and x1, x8, #0x7fffffffffffffff
 ; CHECK-BE-NEXT:    and x2, x10, #0x7fffffffffffffff
 ; CHECK-BE-NEXT:    ret
-  %lv = load <4 x i63>, <4 x i63>* %A, align 8, !nontemporal !0
+  %lv = load <4 x i63>, ptr %A, align 8, !nontemporal !0
   ret <4 x i63> %lv
 }
 
-define <5 x double> @test_ldnp_v5f64(<5 x double>* %A) {
+define <5 x double> @test_ldnp_v5f64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v5f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q2, [x0]
@@ -581,11 +581,11 @@ define <5 x double> @test_ldnp_v5f64(<5 x double>* %A) {
 ; CHECK-BE-NEXT:    // kill: def $d2 killed $d2 killed $q2
 ; CHECK-BE-NEXT:    // kill: def $d3 killed $d3 killed $q3
 ; CHECK-BE-NEXT:    ret
-  %lv = load<5 x double>, <5 x double>* %A, align 8, !nontemporal !0
+  %lv = load<5 x double>, ptr %A, align 8, !nontemporal !0
   ret <5 x double> %lv
 }
 
-define <16 x i64> @test_ldnp_v16i64(<16 x i64>* %A) {
+define <16 x i64> @test_ldnp_v16i64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v16i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -601,11 +601,11 @@ define <16 x i64> @test_ldnp_v16i64(<16 x i64>* %A) {
 ; CHECK-BE-NEXT:    ldp q4, q5, [x0, #64]
 ; CHECK-BE-NEXT:    ldp q6, q7, [x0, #96]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <16 x i64>, <16 x i64>* %A, align 8, !nontemporal !0
+  %lv = load <16 x i64>, ptr %A, align 8, !nontemporal !0
   ret <16 x i64> %lv
 }
 
-define <16 x double> @test_ldnp_v16f64(<16 x double>* %A) {
+define <16 x double> @test_ldnp_v16f64(ptr %A) {
 ; CHECK-LABEL: test_ldnp_v16f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldnp q0, q1, [x0]
@@ -621,7 +621,7 @@ define <16 x double> @test_ldnp_v16f64(<16 x double>* %A) {
 ; CHECK-BE-NEXT:    ldp q4, q5, [x0, #64]
 ; CHECK-BE-NEXT:    ldp q6, q7, [x0, #96]
 ; CHECK-BE-NEXT:    ret
-  %lv = load <16 x double>, <16 x double>* %A, align 8, !nontemporal !0
+  %lv = load <16 x double>, ptr %A, align 8, !nontemporal !0
   ret <16 x double> %lv
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/nontemporal.ll b/llvm/test/CodeGen/AArch64/nontemporal.ll
index 33cc21e32520b..92c56fcc9fc61 100644
--- a/llvm/test/CodeGen/AArch64/nontemporal.ll
+++ b/llvm/test/CodeGen/AArch64/nontemporal.ll
@@ -1,301 +1,294 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple aarch64-apple-darwin | FileCheck %s
 
-define void @test_stnp_v4i64(<4 x i64>* %p, <4 x i64> %v) #0 {
+define void @test_stnp_v4i64(ptr %p, <4 x i64> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    stnp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x i64> %v, <4 x i64>* %p, align 1, !nontemporal !0
+  store <4 x i64> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4i32(<4 x i32>* %p, <4 x i32> %v) #0 {
+define void @test_stnp_v4i32(ptr %p, <4 x i32> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x i32> %v, <4 x i32>* %p, align 1, !nontemporal !0
+  store <4 x i32> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v8i16(<8 x i16>* %p, <8 x i16> %v) #0 {
+define void @test_stnp_v8i16(ptr %p, <8 x i16> %v) #0 {
 ; CHECK-LABEL: test_stnp_v8i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0]
 ; CHECK-NEXT:    ret
-  store <8 x i16> %v, <8 x i16>* %p, align 1, !nontemporal !0
+  store <8 x i16> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v16i8(<16 x i8>* %p, <16 x i8> %v) #0 {
+define void @test_stnp_v16i8(ptr %p, <16 x i8> %v) #0 {
 ; CHECK-LABEL: test_stnp_v16i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0]
 ; CHECK-NEXT:    ret
-  store <16 x i8> %v, <16 x i8>* %p, align 1, !nontemporal !0
+  store <16 x i8> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2i32(<2 x i32>* %p, <2 x i32> %v) #0 {
+define void @test_stnp_v2i32(ptr %p, <2 x i32> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2i32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x i32> %v, <2 x i32>* %p, align 1, !nontemporal !0
+  store <2 x i32> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4i16(<4 x i16>* %p, <4 x i16> %v) #0 {
+define void @test_stnp_v4i16(ptr %p, <4 x i16> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4i16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x i16> %v, <4 x i16>* %p, align 1, !nontemporal !0
+  store <4 x i16> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v8i8(<8 x i8>* %p, <8 x i8> %v) #0 {
+define void @test_stnp_v8i8(ptr %p, <8 x i8> %v) #0 {
 ; CHECK-LABEL: test_stnp_v8i8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0]
 ; CHECK-NEXT:    ret
-  store <8 x i8> %v, <8 x i8>* %p, align 1, !nontemporal !0
+  store <8 x i8> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f64(<2 x double>* %p, <2 x double> %v) #0 {
+define void @test_stnp_v2f64(ptr %p, <2 x double> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x double> %v, <2 x double>* %p, align 1, !nontemporal !0
+  store <2 x double> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32(<4 x float>* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x float> %v, <4 x float>* %p, align 1, !nontemporal !0
+  store <4 x float> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f32(<2 x float>* %p, <2 x float> %v) #0 {
+define void @test_stnp_v2f32(ptr %p, <2 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x float> %v, <2 x float>* %p, align 1, !nontemporal !0
+  store <2 x float> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v1f64(<1 x double>* %p, <1 x double> %v) #0 {
+define void @test_stnp_v1f64(ptr %p, <1 x double> %v) #0 {
 ; CHECK-LABEL: test_stnp_v1f64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0]
 ; CHECK-NEXT:    ret
-  store <1 x double> %v, <1 x double>* %p, align 1, !nontemporal !0
+  store <1 x double> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v1i64(<1 x i64>* %p, <1 x i64> %v) #0 {
+define void @test_stnp_v1i64(ptr %p, <1 x i64> %v) #0 {
 ; CHECK-LABEL: test_stnp_v1i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0]
 ; CHECK-NEXT:    ret
-  store <1 x i64> %v, <1 x i64>* %p, align 1, !nontemporal !0
+  store <1 x i64> %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_i64(i64* %p, i64 %v) #0 {
+define void @test_stnp_i64(ptr %p, i64 %v) #0 {
 ; CHECK-LABEL: test_stnp_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr x8, x1, #32
 ; CHECK-NEXT:    stnp w1, w8, [x0]
 ; CHECK-NEXT:    ret
-  store i64 %v, i64* %p, align 1, !nontemporal !0
+  store i64 %v, ptr %p, align 1, !nontemporal !0
   ret void
 }
 
 
-define void @test_stnp_v2f64_offset(<2 x double>* %p, <2 x double> %v) #0 {
+define void @test_stnp_v2f64_offset(ptr %p, <2 x double> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f64_offset:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0, #16]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr <2 x double>, <2 x double>* %p, i32 1
-  store <2 x double> %v, <2 x double>* %tmp0, align 1, !nontemporal !0
+  %tmp0 = getelementptr <2 x double>, ptr %p, i32 1
+  store <2 x double> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f64_offset_neg(<2 x double>* %p, <2 x double> %v) #0 {
+define void @test_stnp_v2f64_offset_neg(ptr %p, <2 x double> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f64_offset_neg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0, #-16]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr <2 x double>, <2 x double>* %p, i32 -1
-  store <2 x double> %v, <2 x double>* %tmp0, align 1, !nontemporal !0
+  %tmp0 = getelementptr <2 x double>, ptr %p, i32 -1
+  store <2 x double> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f32_offset(<2 x float>* %p, <2 x float> %v) #0 {
+define void @test_stnp_v2f32_offset(ptr %p, <2 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f32_offset:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0, #8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr <2 x float>, <2 x float>* %p, i32 1
-  store <2 x float> %v, <2 x float>* %tmp0, align 1, !nontemporal !0
+  %tmp0 = getelementptr <2 x float>, ptr %p, i32 1
+  store <2 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f32_offset_neg(<2 x float>* %p, <2 x float> %v) #0 {
+define void @test_stnp_v2f32_offset_neg(ptr %p, <2 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f32_offset_neg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0, #-8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr <2 x float>, <2 x float>* %p, i32 -1
-  store <2 x float> %v, <2 x float>* %tmp0, align 1, !nontemporal !0
+  %tmp0 = getelementptr <2 x float>, ptr %p, i32 -1
+  store <2 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_i64_offset(i64* %p, i64 %v) #0 {
+define void @test_stnp_i64_offset(ptr %p, i64 %v) #0 {
 ; CHECK-LABEL: test_stnp_i64_offset:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr x8, x1, #32
 ; CHECK-NEXT:    stnp w1, w8, [x0, #8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i64, i64* %p, i32 1
-  store i64 %v, i64* %tmp0, align 1, !nontemporal !0
+  %tmp0 = getelementptr i64, ptr %p, i32 1
+  store i64 %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_i64_offset_neg(i64* %p, i64 %v) #0 {
+define void @test_stnp_i64_offset_neg(ptr %p, i64 %v) #0 {
 ; CHECK-LABEL: test_stnp_i64_offset_neg:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    lsr x8, x1, #32
 ; CHECK-NEXT:    stnp w1, w8, [x0, #-8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i64, i64* %p, i32 -1
-  store i64 %v, i64* %tmp0, align 1, !nontemporal !0
+  %tmp0 = getelementptr i64, ptr %p, i32 -1
+  store i64 %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32_invalid_offset_4(i8* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32_invalid_offset_4(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_invalid_offset_4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    add x8, x0, #4
 ; CHECK-NEXT:    stnp d0, d1, [x8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 4
-  %tmp1 = bitcast i8* %tmp0 to <4 x float>*
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 4
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32_invalid_offset_neg_4(i8* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32_invalid_offset_neg_4(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_invalid_offset_neg_4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    stnp d0, d1, [x8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 -4
-  %tmp1 = bitcast i8* %tmp0 to <4 x float>*
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 -4
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32_invalid_offset_512(i8* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32_invalid_offset_512(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_invalid_offset_512:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    add x8, x0, #512
 ; CHECK-NEXT:    stnp d0, d1, [x8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 512
-  %tmp1 = bitcast i8* %tmp0 to <4 x float>*
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 512
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32_offset_504(i8* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32_offset_504(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_offset_504:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0, #504]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 504
-  %tmp1 = bitcast i8* %tmp0 to <4 x float>*
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 504
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32_invalid_offset_508(i8* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32_invalid_offset_508(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_invalid_offset_508:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    add x8, x0, #508
 ; CHECK-NEXT:    stnp d0, d1, [x8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 508
-  %tmp1 = bitcast i8* %tmp0 to <4 x float>*
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 508
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32_invalid_offset_neg_520(i8* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32_invalid_offset_neg_520(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_invalid_offset_neg_520:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    sub x8, x0, #520
 ; CHECK-NEXT:    stnp d0, d1, [x8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 -520
-  %tmp1 = bitcast i8* %tmp0 to <4 x float>*
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 -520
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v4f32_offset_neg_512(i8* %p, <4 x float> %v) #0 {
+define void @test_stnp_v4f32_offset_neg_512(ptr %p, <4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_offset_neg_512:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    mov d1, v0[1]
 ; CHECK-NEXT:    stnp d0, d1, [x0, #-512]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 -512
-  %tmp1 = bitcast i8* %tmp0 to <4 x float>*
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 -512
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
 
-define void @test_stnp_v2f32_invalid_offset_256(i8* %p, <2 x float> %v) #0 {
+define void @test_stnp_v2f32_invalid_offset_256(ptr %p, <2 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f32_invalid_offset_256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
@@ -303,26 +296,24 @@ define void @test_stnp_v2f32_invalid_offset_256(i8* %p, <2 x float> %v) #0 {
 ; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    stnp s0, s1, [x8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 256
-  %tmp1 = bitcast i8* %tmp0 to <2 x float>*
-  store <2 x float> %v, <2 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 256
+  store <2 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f32_offset_252(i8* %p, <2 x float> %v) #0 {
+define void @test_stnp_v2f32_offset_252(ptr %p, <2 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f32_offset_252:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0, #252]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 252
-  %tmp1 = bitcast i8* %tmp0 to <2 x float>*
-  store <2 x float> %v, <2 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 252
+  store <2 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f32_invalid_offset_neg_260(i8* %p, <2 x float> %v) #0 {
+define void @test_stnp_v2f32_invalid_offset_neg_260(ptr %p, <2 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f32_invalid_offset_neg_260:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
@@ -330,26 +321,24 @@ define void @test_stnp_v2f32_invalid_offset_neg_260(i8* %p, <2 x float> %v) #0 {
 ; CHECK-NEXT:    sub x8, x0, #260
 ; CHECK-NEXT:    stnp s0, s1, [x8]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 -260
-  %tmp1 = bitcast i8* %tmp0 to <2 x float>*
-  store <2 x float> %v, <2 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 -260
+  store <2 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v2f32_offset_neg_256(i8* %p, <2 x float> %v) #0 {
+define void @test_stnp_v2f32_offset_neg_256(ptr %p, <2 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v2f32_offset_neg_256:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov s1, v0[1]
 ; CHECK-NEXT:    stnp s0, s1, [x0, #-256]
 ; CHECK-NEXT:    ret
-  %tmp0 = getelementptr i8, i8* %p, i32 -256
-  %tmp1 = bitcast i8* %tmp0 to <2 x float>*
-  store <2 x float> %v, <2 x float>* %tmp1, align 1, !nontemporal !0
+  %tmp0 = getelementptr i8, ptr %p, i32 -256
+  store <2 x float> %v, ptr %tmp0, align 1, !nontemporal !0
   ret void
 }
 
-declare void @dummy(<4 x float>*)
+declare void @dummy(ptr)
 
 define void @test_stnp_v4f32_offset_alloca(<4 x float> %v) #0 {
 ; CHECK-LABEL: test_stnp_v4f32_offset_alloca:
@@ -364,8 +353,8 @@ define void @test_stnp_v4f32_offset_alloca(<4 x float> %v) #0 {
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %tmp0 = alloca <4 x float>
-  store <4 x float> %v, <4 x float>* %tmp0, align 1, !nontemporal !0
-  call void @dummy(<4 x float>* %tmp0)
+  store <4 x float> %v, ptr %tmp0, align 1, !nontemporal !0
+  call void @dummy(ptr %tmp0)
   ret void
 }
 
@@ -382,24 +371,24 @@ define void @test_stnp_v4f32_offset_alloca_2(<4 x float> %v) #0 {
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %tmp0 = alloca <4 x float>, i32 2
-  %tmp1 = getelementptr <4 x float>, <4 x float>* %tmp0, i32 1
-  store <4 x float> %v, <4 x float>* %tmp1, align 1, !nontemporal !0
-  call void @dummy(<4 x float>* %tmp0)
+  %tmp1 = getelementptr <4 x float>, ptr %tmp0, i32 1
+  store <4 x float> %v, ptr %tmp1, align 1, !nontemporal !0
+  call void @dummy(ptr %tmp0)
   ret void
 }
 
-define void @test_stnp_v32i8(<32 x i8> %v, <32 x i8>* %ptr) {
+define void @test_stnp_v32i8(<32 x i8> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v32i8:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stnp q0, q1, [x0]
 ; CHECK-NEXT:    ret
 
 entry:
-  store <32 x i8> %v, <32 x i8>* %ptr, align 4, !nontemporal !0
+  store <32 x i8> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v32i16(<32 x i16> %v, <32 x i16>* %ptr) {
+define void @test_stnp_v32i16(<32 x i16> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v32i16:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stnp q2, q3, [x0, #32]
@@ -407,11 +396,11 @@ define void @test_stnp_v32i16(<32 x i16> %v, <32 x i16>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  store <32 x i16> %v, <32 x i16>* %ptr, align 4, !nontemporal !0
+  store <32 x i16> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v32f16(<32 x half> %v, <32 x half>* %ptr) {
+define void @test_stnp_v32f16(<32 x half> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v32f16:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stnp q2, q3, [x0, #32]
@@ -419,11 +408,11 @@ define void @test_stnp_v32f16(<32 x half> %v, <32 x half>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  store <32 x half> %v, <32 x half>* %ptr, align 4, !nontemporal !0
+  store <32 x half> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v16i32(<16 x i32> %v, <16 x i32>* %ptr) {
+define void @test_stnp_v16i32(<16 x i32> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v16i32:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stnp q2, q3, [x0, #32]
@@ -431,11 +420,11 @@ define void @test_stnp_v16i32(<16 x i32> %v, <16 x i32>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  store <16 x i32> %v, <16 x i32>* %ptr, align 4, !nontemporal !0
+  store <16 x i32> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v16f32(<16 x float> %v, <16 x float>* %ptr) {
+define void @test_stnp_v16f32(<16 x float> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v16f32:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stnp q2, q3, [x0, #32]
@@ -443,11 +432,11 @@ define void @test_stnp_v16f32(<16 x float> %v, <16 x float>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  store <16 x float> %v, <16 x float>* %ptr, align 4, !nontemporal !0
+  store <16 x float> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v17f32(<17 x float> %v, <17 x float>* %ptr) {
+define void @test_stnp_v17f32(<17 x float> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v17f32:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr s16, [sp, #16]
@@ -491,10 +480,10 @@ define void @test_stnp_v17f32(<17 x float> %v, <17 x float>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  store <17 x float> %v, <17 x float>* %ptr, align 4, !nontemporal !0
+  store <17 x float> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
-define void @test_stnp_v16i32_invalid_offset(<16 x i32> %v, <16 x i32>* %ptr) {
+define void @test_stnp_v16i32_invalid_offset(<16 x i32> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v16i32_invalid_offset:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    mov w8, #32032
@@ -506,12 +495,12 @@ define void @test_stnp_v16i32_invalid_offset(<16 x i32> %v, <16 x i32>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  %gep = getelementptr <16 x i32>, <16 x i32>* %ptr, i32 500
-  store <16 x i32> %v, <16 x i32>* %gep, align 4, !nontemporal !0
+  %gep = getelementptr <16 x i32>, ptr %ptr, i32 500
+  store <16 x i32> %v, ptr %gep, align 4, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v16f64(<16 x double> %v, <16 x double>* %ptr) {
+define void @test_stnp_v16f64(<16 x double> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v16f64:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stnp q6, q7, [x0, #96]
@@ -521,11 +510,11 @@ define void @test_stnp_v16f64(<16 x double> %v, <16 x double>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  store <16 x double> %v, <16 x double>* %ptr, align 4, !nontemporal !0
+  store <16 x double> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 
-define void @test_stnp_v16i64(<16 x i64> %v, <16 x i64>* %ptr) {
+define void @test_stnp_v16i64(<16 x i64> %v, ptr %ptr) {
 ; CHECK-LABEL: test_stnp_v16i64:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    stnp q6, q7, [x0, #96]
@@ -535,7 +524,7 @@ define void @test_stnp_v16i64(<16 x i64> %v, <16 x i64>* %ptr) {
 ; CHECK-NEXT:    ret
 
 entry:
-  store <16 x i64> %v, <16 x i64>* %ptr, align 4, !nontemporal !0
+  store <16 x i64> %v, ptr %ptr, align 4, !nontemporal !0
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/nzcv-save.ll b/llvm/test/CodeGen/AArch64/nzcv-save.ll
index ddb1d3219f78b..2fee2666316ba 100644
--- a/llvm/test/CodeGen/AArch64/nzcv-save.ll
+++ b/llvm/test/CodeGen/AArch64/nzcv-save.ll
@@ -3,7 +3,7 @@
 
 ; DAG ends up with two uses for the flags from an ADCS node, which means they
 ; must be saved for later.
-define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256* nocapture %dd) nounwind uwtable noinline ssp {
+define void @f(ptr nocapture %a, ptr nocapture %b, ptr nocapture %cc, ptr nocapture %dd) nounwind uwtable noinline ssp {
 ; CHECK-LABEL: f:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldp x9, x8, [x2]
@@ -22,12 +22,12 @@ define void @f(i256* nocapture %a, i256* nocapture %b, i256* nocapture %cc, i256
 ; CHECK-NEXT:    stp x9, x8, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  %c = load i256, i256* %cc
-  %d = load i256, i256* %dd
+  %c = load i256, ptr %cc
+  %d = load i256, ptr %dd
   %add = add nsw i256 %c, %d
-  store i256 %add, i256* %a, align 8
+  store i256 %add, ptr %a, align 8
   %or = or i256 %c, 1606938044258990275541962092341162602522202993782792835301376
   %add6 = add nsw i256 %or, %d
-  store i256 %add6, i256* %b, align 8
+  store i256 %add6, ptr %b, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
index 32217a3863771..c8e6c3f3fbfd2 100644
--- a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
+++ b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
@@ -50,7 +50,7 @@ b2:
   br label %b5
 
 b3:
-  %v1 = load i32, i32* undef, align 4
+  %v1 = load i32, ptr undef, align 4
   %v2 = and i32 %v1, 256
   br label %b5
 

diff  --git a/llvm/test/CodeGen/AArch64/optimize-imm.ll b/llvm/test/CodeGen/AArch64/optimize-imm.ll
index 4d007e417dcaf..e0b5366a1a31d 100644
--- a/llvm/test/CodeGen/AArch64/optimize-imm.ll
+++ b/llvm/test/CodeGen/AArch64/optimize-imm.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -o - %s -mtriple=aarch64-- | FileCheck %s
 
-define void @and1(i32 %a, i8* nocapture %p) {
+define void @and1(i32 %a, ptr nocapture %p) {
 ; CHECK-LABEL: and1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    and w8, w0, #0xfffffffd
@@ -10,7 +10,7 @@ define void @and1(i32 %a, i8* nocapture %p) {
 entry:
   %and = and i32 %a, 253
   %conv = trunc i32 %and to i8
-  store i8 %conv, i8* %p, align 1
+  store i8 %conv, ptr %p, align 1
   ret void
 }
 
@@ -86,8 +86,8 @@ define i64 @PR33100(i64 %arg) {
 ; CHECK-NEXT:    ret
 entry:
   %alloca0 = alloca i64
-  store i64 8, i64* %alloca0, align 4
-  %t0 = load i64, i64* %alloca0, align 4
+  store i64 8, ptr %alloca0, align 4
+  %t0 = load i64, ptr %alloca0, align 4
   %t1 = shl i64 %arg, %t0
   %and0 = and i64 %t1, 129
   %xor0 = xor i64 %arg, 129

diff  --git a/llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll b/llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll
index 8cd45160fcf44..e676cacf1b51d 100644
--- a/llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll
+++ b/llvm/test/CodeGen/AArch64/overeager_mla_fusing.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc %s --mtriple aarch64 -verify-machineinstrs -o - | FileCheck %s
 
-define dso_local void @jsimd_idct_ifast_neon_intrinsic(i8* nocapture readonly %dct_table, i16* nocapture readonly %coef_block, i8** nocapture readonly %output_buf, i32 %output_col) local_unnamed_addr #0 {
+define dso_local void @jsimd_idct_ifast_neon_intrinsic(ptr nocapture readonly %dct_table, ptr nocapture readonly %coef_block, ptr nocapture readonly %output_buf, i32 %output_col) local_unnamed_addr #0 {
 ; CHECK-LABEL: jsimd_idct_ifast_neon_intrinsic:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr q0, [x1, #32]
@@ -19,41 +19,35 @@ define dso_local void @jsimd_idct_ifast_neon_intrinsic(i8* nocapture readonly %d
 ; CHECK-NEXT:    str q0, [x9, x8]
 ; CHECK-NEXT:    ret
 entry:
-  %add.ptr5 = getelementptr inbounds i16, i16* %coef_block, i64 16
-  %0 = bitcast i16* %add.ptr5 to <8 x i16>*
-  %1 = load <8 x i16>, <8 x i16>* %0, align 16
+  %add.ptr5 = getelementptr inbounds i16, ptr %coef_block, i64 16
+  %0 = load <8 x i16>, ptr %add.ptr5, align 16
 
-  %add.ptr17 = getelementptr inbounds i16, i16* %coef_block, i64 48
-  %2 = bitcast i16* %add.ptr17 to <8 x i16>*
-  %3 = load <8 x i16>, <8 x i16>* %2, align 16
+  %add.ptr17 = getelementptr inbounds i16, ptr %coef_block, i64 48
+  %1 = load <8 x i16>, ptr %add.ptr17, align 16
 
-  %add.ptr29 = getelementptr inbounds i8, i8* %dct_table, i64 32
-  %4 = bitcast i8* %add.ptr29 to <8 x i16>*
-  %5 = load <8 x i16>, <8 x i16>* %4, align 16
+  %add.ptr29 = getelementptr inbounds i8, ptr %dct_table, i64 32
+  %2 = load <8 x i16>, ptr %add.ptr29, align 16
 
-  %add.ptr41 = getelementptr inbounds i8, i8* %dct_table, i64 96
-  %6 = bitcast i8* %add.ptr41 to <8 x i16>*
-  %7 = load <8 x i16>, <8 x i16>* %6, align 16
+  %add.ptr41 = getelementptr inbounds i8, ptr %dct_table, i64 96
+  %3 = load <8 x i16>, ptr %add.ptr41, align 16
 
-  %mul.i966 = mul <8 x i16> %5, %1
-  %mul.i964 = mul <8 x i16> %7, %3
+  %mul.i966 = mul <8 x i16> %2, %0
+  %mul.i964 = mul <8 x i16> %3, %1
 
   %add.i961 = add <8 x i16> %mul.i966, %mul.i964
   %sub.i960 = sub <8 x i16> %mul.i966, %mul.i964
 
   %idx.ext = zext i32 %output_col to i64
 
-  %arrayidx404 = getelementptr inbounds i8*, i8** %output_buf, i64 6
-  %8 = load i8*, i8** %arrayidx404, align 8
-  %add.ptr406 = getelementptr inbounds i8, i8* %8, i64 %idx.ext
-  %9 = bitcast i8* %add.ptr406 to <8 x i16>*
-  store <8 x i16> %add.i961, <8 x i16>* %9, align 8
-
-  %arrayidx408 = getelementptr inbounds i8*, i8** %output_buf, i64 7
-  %10 = load i8*, i8** %arrayidx408, align 8
-  %add.ptr410 = getelementptr inbounds i8, i8* %10, i64 %idx.ext
-  %11 = bitcast i8* %add.ptr410 to <8 x i16>*
-  store <8 x i16> %sub.i960, <8 x i16>* %11, align 8
+  %arrayidx404 = getelementptr inbounds ptr, ptr %output_buf, i64 6
+  %4 = load ptr, ptr %arrayidx404, align 8
+  %add.ptr406 = getelementptr inbounds i8, ptr %4, i64 %idx.ext
+  store <8 x i16> %add.i961, ptr %add.ptr406, align 8
+
+  %arrayidx408 = getelementptr inbounds ptr, ptr %output_buf, i64 7
+  %5 = load ptr, ptr %arrayidx408, align 8
+  %add.ptr410 = getelementptr inbounds i8, ptr %5, i64 %idx.ext
+  store <8 x i16> %sub.i960, ptr %add.ptr410, align 8
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-1.ll b/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-1.ll
index 98dcd16fadd46..4a2c17d8a6c4e 100644
--- a/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-1.ll
+++ b/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-1.ll
@@ -2,7 +2,7 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux"
 
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @asan.module_ctor, i8* null }]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @asan.module_ctor, ptr null }]
 
 define dso_local i32 @f() #0 {
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-2.ll b/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-2.ll
index 3cb4505041f3b..23461dcc62d2b 100644
--- a/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-2.ll
+++ b/llvm/test/CodeGen/AArch64/pacbti-llvm-generated-funcs-2.ll
@@ -4,7 +4,7 @@ target triple = "aarch64-unknown-linux"
 
 @__llvm_gcov_ctr = internal global [1 x i64] zeroinitializer
 @0 = private unnamed_addr constant [7 x i8] c"m.gcda\00", align 1
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @__llvm_gcov_init, i8* null }]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @__llvm_gcov_init, ptr null }]
 
 define dso_local i32 @f() local_unnamed_addr #0 {
 entry:
@@ -13,11 +13,11 @@ entry:
 ;; CHECK-LABEL: f:
 ;; CHECK: pacib x30, sp
 
-declare void @llvm_gcda_start_file(i8*, i32, i32) local_unnamed_addr
+declare void @llvm_gcda_start_file(ptr, i32, i32) local_unnamed_addr
 
 declare void @llvm_gcda_emit_function(i32, i32, i32) local_unnamed_addr
 
-declare void @llvm_gcda_emit_arcs(i32, i64*) local_unnamed_addr
+declare void @llvm_gcda_emit_arcs(i32, ptr) local_unnamed_addr
 
 declare void @llvm_gcda_summary_info() local_unnamed_addr
 
@@ -25,9 +25,9 @@ declare void @llvm_gcda_end_file() local_unnamed_addr
 
 define internal void @__llvm_gcov_writeout() unnamed_addr #1 {
 entry:
-  tail call void @llvm_gcda_start_file(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @0, i64 0, i64 0), i32 875575338, i32 0)
+  tail call void @llvm_gcda_start_file(ptr @0, i32 875575338, i32 0)
   tail call void @llvm_gcda_emit_function(i32 0, i32 0, i32 0)
-  tail call void @llvm_gcda_emit_arcs(i32 1, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0))
+  tail call void @llvm_gcda_emit_arcs(i32 1, ptr @__llvm_gcov_ctr)
   tail call void @llvm_gcda_summary_info()
   tail call void @llvm_gcda_end_file()
   ret void
@@ -39,17 +39,17 @@ entry:
 
 define internal void @__llvm_gcov_reset() unnamed_addr #2 {
 entry:
-  store i64 0, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), align 8
+  store i64 0, ptr @__llvm_gcov_ctr, align 8
   ret void
 }
 ;; CHECK-LABEL: __llvm_gcov_reset:
 ;; CHECK:       pacib x30, sp
 
-declare void @llvm_gcov_init(void ()*, void ()*) local_unnamed_addr
+declare void @llvm_gcov_init(ptr, ptr) local_unnamed_addr
 
 define internal void @__llvm_gcov_init() unnamed_addr #1 {
 entry:
-  tail call void @llvm_gcov_init(void ()* nonnull @__llvm_gcov_writeout, void ()* nonnull @__llvm_gcov_reset)
+  tail call void @llvm_gcov_init(ptr nonnull @__llvm_gcov_writeout, ptr nonnull @__llvm_gcov_reset)
   ret void
 }
 ;; CHECK-LABEL: __llvm_gcov_init:

diff  --git a/llvm/test/CodeGen/AArch64/paired-load.ll b/llvm/test/CodeGen/AArch64/paired-load.ll
index e6d2d4f511645..d185e90014684 100644
--- a/llvm/test/CodeGen/AArch64/paired-load.ll
+++ b/llvm/test/CodeGen/AArch64/paired-load.ll
@@ -5,12 +5,12 @@ target triple = "aarch64-linux-gnu"
 ; Ensure we're generating ldp instructions instead of ldr Q.
 ; CHECK: ldp
 ; CHECK: stp
-define void @f(i64* %p, i64* %q) {
-  %addr2 = getelementptr i64, i64* %q, i32 1
-  %addr = getelementptr i64, i64* %p, i32 1
-  %x = load i64, i64* %p
-  %y = load i64, i64* %addr
-  store i64 %x, i64* %q
-  store i64 %y, i64* %addr2
+define void @f(ptr %p, ptr %q) {
+  %addr2 = getelementptr i64, ptr %q, i32 1
+  %addr = getelementptr i64, ptr %p, i32 1
+  %x = load i64, ptr %p
+  %y = load i64, ptr %addr
+  store i64 %x, ptr %q
+  store i64 %y, ptr %addr2
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
index 067da9123fab8..c2ef2fa16a9a2 100644
--- a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
+++ b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
@@ -17,16 +17,16 @@ source_filename = "loop.c"
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-none-linux-gnu"
 
- at q = dso_local local_unnamed_addr global i32* null, align 8
+ at q = dso_local local_unnamed_addr global ptr null, align 8
 
 ; Function Attrs: nofree norecurse nounwind
-define dso_local i32 @main(i32 %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 {
+define dso_local i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 {
 entry:
   %cmp5 = icmp sgt i32 %argc, 0
   br i1 %cmp5, label %for.body.lr.ph, label %for.cond.cleanup
 
 for.body.lr.ph:                                   ; preds = %entry
-  %0 = load i32*, i32** @q, align 8, !tbaa !2
+  %0 = load ptr, ptr @q, align 8, !tbaa !2
   %1 = zext i32 %argc to i64
   %2 = add nsw i64 %1, -1
   %3 = lshr i64 %2, 5
@@ -47,12 +47,12 @@ vector.body:                                      ; preds = %vector.body, %vecto
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %offset.idx = shl i64 %index, 5
   %induction7 = or i64 %offset.idx, 32
-  %5 = getelementptr inbounds i32, i32* %0, i64 %offset.idx
-  %6 = getelementptr inbounds i32, i32* %0, i64 %induction7
+  %5 = getelementptr inbounds i32, ptr %0, i64 %offset.idx
+  %6 = getelementptr inbounds i32, ptr %0, i64 %induction7
   %7 = trunc i64 %offset.idx to i32
   %8 = trunc i64 %induction7 to i32
-  store i32 %7, i32* %5, align 4, !tbaa !6
-  store i32 %8, i32* %6, align 4, !tbaa !6
+  store i32 %7, ptr %5, align 4, !tbaa !6
+  store i32 %8, ptr %6, align 4, !tbaa !6
   %index.next = add i64 %index, 2
   %9 = icmp eq i64 %index.next, %n.vec
   br i1 %9, label %middle.block, label %vector.body, !llvm.loop !8
@@ -66,9 +66,9 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader, %for.body
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
-  %arrayidx = getelementptr inbounds i32, i32* %0, i64 %indvars.iv
+  %arrayidx = getelementptr inbounds i32, ptr %0, i64 %indvars.iv
   %10 = trunc i64 %indvars.iv to i32
-  store i32 %10, i32* %arrayidx, align 4, !tbaa !6
+  store i32 %10, ptr %arrayidx, align 4, !tbaa !6
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 32
   %cmp = icmp ult i64 %indvars.iv.next, %1
   br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !10

diff  --git a/llvm/test/CodeGen/AArch64/pcsections.ll b/llvm/test/CodeGen/AArch64/pcsections.ll
index a98a8ab5c5070..8d09c0165c8e1 100644
--- a/llvm/test/CodeGen/AArch64/pcsections.ll
+++ b/llvm/test/CodeGen/AArch64/pcsections.ll
@@ -33,7 +33,7 @@ define i64 @multiple() !pcsections !0 {
 ; CHECK-NEXT:  .word	21264
 ; CHECK-NEXT:  .text
 entry:
-  %0 = load i64, i64* @bar, align 8, !pcsections !1
+  %0 = load i64, ptr @bar, align 8, !pcsections !1
   ret i64 %0
 }
 
@@ -51,8 +51,8 @@ define i64 @test_simple_atomic() {
 ; LARGE-NEXT:  .xword	.Lpcsection1-.Lpcsection_base3
 ; CHECK-NEXT:  .text
 entry:
-  %0 = load atomic i64, i64* @foo monotonic, align 8, !pcsections !0
-  %1 = load i64, i64* @bar, align 8
+  %0 = load atomic i64, ptr @foo monotonic, align 8, !pcsections !0
+  %1 = load i64, ptr @bar, align 8
   %add = add nsw i64 %1, %0
   ret i64 %add
 }
@@ -103,10 +103,10 @@ define i64 @test_complex_atomic() {
 ; CHECK-UNOPT: .word	.Lpcsection13-.Lpcsection_base15
 ; CHECK-NEXT:  .text
 entry:
-  %0 = atomicrmw add i64* @foo, i64 1 monotonic, align 8, !pcsections !0
-  %1 = load i64, i64* @bar, align 8
+  %0 = atomicrmw add ptr @foo, i64 1 monotonic, align 8, !pcsections !0
+  %1 = load i64, ptr @bar, align 8
   %inc = add nsw i64 %1, 1
-  store i64 %inc, i64* @bar, align 8
+  store i64 %inc, ptr @bar, align 8
   %add = add nsw i64 %1, %0
   ret i64 %add
 }

diff  --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
index ff5a0164eb955..3b3ef4a7ec53c 100644
--- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
+++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
 
-%struct.anon = type { i32*, i32* }
+%struct.anon = type { ptr, ptr }
 
- at ptr_wrapper = common global %struct.anon* null, align 8
+ at ptr_wrapper = common global ptr null, align 8
 
 define i32 @test_func_i32_two_uses(i32 %in, i32 %bit, i32 %mask) {
 ; CHECK-LABEL: test_func_i32_two_uses:
@@ -36,8 +36,8 @@ define i32 @test_func_i32_two_uses(i32 %in, i32 %bit, i32 %mask) {
 ; CHECK-NEXT:  .LBB0_6: // %do.end
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load %struct.anon*, %struct.anon** @ptr_wrapper, align 8
-  %result = getelementptr inbounds %struct.anon, %struct.anon* %0, i64 0, i32 1
+  %0 = load ptr, ptr @ptr_wrapper, align 8
+  %result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
   %tobool2 = icmp ne i32 %mask, 0
   br label %do.body
 
@@ -57,7 +57,7 @@ do.body:                                          ; preds = %4, %entry
   br i1 %dummy_or, label %3, label %4
 
 3:                                                ; preds = %do.body
-  store i32* null, i32** %result, align 8
+  store ptr null, ptr %result, align 8
   br label %4
 
 4:                                                ; preds = %do.body, %3
@@ -93,8 +93,8 @@ define i32 @test_func_i64_one_use(i64 %in, i64 %bit, i64 %mask) {
 ; CHECK-NEXT:  .LBB1_4: // %do.end
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load %struct.anon*, %struct.anon** @ptr_wrapper, align 8
-  %result = getelementptr inbounds %struct.anon, %struct.anon* %0, i64 0, i32 1
+  %0 = load ptr, ptr @ptr_wrapper, align 8
+  %result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
   %tobool2 = icmp ne i64 %mask, 0
   br label %do.body
 
@@ -111,7 +111,7 @@ do.body:                                          ; preds = %4, %entry
   br i1 %2, label %3, label %4
 
 3:                                                ; preds = %do.body
-  store i32* null, i32** %result, align 8
+  store ptr null, ptr %result, align 8
   br label %4
 
 4:                                                ; preds = %do.body, %3

diff  --git a/llvm/test/CodeGen/AArch64/pic-eh-stubs.ll b/llvm/test/CodeGen/AArch64/pic-eh-stubs.ll
index a438fea89012e..3d072bc892e97 100644
--- a/llvm/test/CodeGen/AArch64/pic-eh-stubs.ll
+++ b/llvm/test/CodeGen/AArch64/pic-eh-stubs.ll
@@ -19,26 +19,25 @@
 ; CHECK: .L_ZTIi.DW.stub:
 ; CHECK-NEXT: .xword _ZTIi
 
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 
-define i32 @_Z3barv() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @_Z3barv() personality ptr @__gxx_personality_v0 {
 entry:
   invoke void @_Z3foov()
           to label %return unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %0 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @_ZTIi to i8*)
-  %1 = extractvalue { i8*, i32 } %0, 1
-  %2 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind
+  %0 = landingpad { ptr, i32 }
+          catch ptr @_ZTIi
+  %1 = extractvalue { ptr, i32 } %0, 1
+  %2 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi) nounwind
   %matches = icmp eq i32 %1, %2
   br i1 %matches, label %catch, label %eh.resume
 
 catch:                                            ; preds = %lpad
-  %3 = extractvalue { i8*, i32 } %0, 0
-  %4 = tail call i8* @__cxa_begin_catch(i8* %3) nounwind
-  %5 = bitcast i8* %4 to i32*
-  %exn.scalar = load i32, i32* %5, align 4
+  %3 = extractvalue { ptr, i32 } %0, 0
+  %4 = tail call ptr @__cxa_begin_catch(ptr %3) nounwind
+  %exn.scalar = load i32, ptr %4, align 4
   tail call void @__cxa_end_catch() nounwind
   br label %return
 
@@ -47,15 +46,15 @@ return:                                           ; preds = %entry, %catch
   ret i32 %retval.0
 
 eh.resume:                                        ; preds = %lpad
-  resume { i8*, i32 } %0
+  resume { ptr, i32 } %0
 }
 
 declare void @_Z3foov()
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
+declare i32 @llvm.eh.typeid.for(ptr) nounwind readnone
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()

diff  --git a/llvm/test/CodeGen/AArch64/pie.ll b/llvm/test/CodeGen/AArch64/pie.ll
index f08020eb875fc..14476e8e45927 100644
--- a/llvm/test/CodeGen/AArch64/pie.ll
+++ b/llvm/test/CodeGen/AArch64/pie.ll
@@ -2,11 +2,11 @@
 
 @g1 = dso_local global i32 42
 
-define dso_local i32* @get_g1() {
+define dso_local ptr @get_g1() {
 ; CHECK:      get_g1:
 ; CHECK:        adrp x0, g1
 ; CHECK-NEXT:   add  x0, x0, :lo12:g1
-  ret i32* @g1
+  ret ptr @g1
 }
 
 !llvm.module.flags = !{!0}

diff  --git a/llvm/test/CodeGen/AArch64/popcount.ll b/llvm/test/CodeGen/AArch64/popcount.ll
index 2d517e9af4f01..f3a90035c0b30 100644
--- a/llvm/test/CodeGen/AArch64/popcount.ll
+++ b/llvm/test/CodeGen/AArch64/popcount.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -O0 -mtriple=aarch64-unknown-unknown | FileCheck %s
 
 ; Function Attrs: nobuiltin nounwind readonly
-define i8 @popcount128(i128* nocapture nonnull readonly %0) {
+define i8 @popcount128(ptr nocapture nonnull readonly %0) {
 ; CHECK-LABEL: popcount128:
 ; CHECK:       // %bb.0: // %Entry
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -14,7 +14,7 @@ define i8 @popcount128(i128* nocapture nonnull readonly %0) {
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
 Entry:
-  %1 = load i128, i128* %0, align 16
+  %1 = load i128, ptr %0, align 16
   %2 = tail call i128 @llvm.ctpop.i128(i128 %1)
   %3 = trunc i128 %2 to i8
   ret i8 %3
@@ -24,7 +24,7 @@ Entry:
 declare i128 @llvm.ctpop.i128(i128)
 
 ; Function Attrs: nobuiltin nounwind readonly
-define i16 @popcount256(i256* nocapture nonnull readonly %0) {
+define i16 @popcount256(ptr nocapture nonnull readonly %0) {
 ; CHECK-LABEL: popcount256:
 ; CHECK:       // %bb.0: // %Entry
 ; CHECK-NEXT:    ldr x11, [x0]
@@ -61,7 +61,7 @@ define i16 @popcount256(i256* nocapture nonnull readonly %0) {
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    ret
 Entry:
-  %1 = load i256, i256* %0, align 16
+  %1 = load i256, ptr %0, align 16
   %2 = tail call i256 @llvm.ctpop.i256(i256 %1)
   %3 = trunc i256 %2 to i16
   ret i16 %3

diff  --git a/llvm/test/CodeGen/AArch64/postra-mi-sched.ll b/llvm/test/CodeGen/AArch64/postra-mi-sched.ll
index e7f3f5515a7ae..7688973a1f2f5 100644
--- a/llvm/test/CodeGen/AArch64/postra-mi-sched.ll
+++ b/llvm/test/CodeGen/AArch64/postra-mi-sched.ll
@@ -16,7 +16,7 @@ entry:
   %mul = fmul float %s2, %s3
   %conv = fpext float %mul to double
   %div = fdiv double %d, %conv
-  store double %div, double* @d1, align 8
+  store double %div, ptr @d1, align 8
   %factor = shl i32 %i3, 1
   %add1 = add i32 %i2, 4
   %add2 = add i32 %add1, %factor

diff  --git a/llvm/test/CodeGen/AArch64/pr27816.ll b/llvm/test/CodeGen/AArch64/pr27816.ll
index df15755cf3f5c..3396a05d8fd7e 100644
--- a/llvm/test/CodeGen/AArch64/pr27816.ll
+++ b/llvm/test/CodeGen/AArch64/pr27816.ll
@@ -11,36 +11,35 @@
 ; CHECK-NOT: strb
 ; CHECK: str wzr, [x1, #8]
 ; CHECK-NOT: strb
-define void @merge_const_store(i32 %count, %struct.A* nocapture %p)  {
+define void @merge_const_store(i32 %count, ptr nocapture %p)  {
   %1 = icmp sgt i32 %count, 0
   br i1 %1, label %.lr.ph, label %._crit_edge
 .lr.ph:
   %i.02 = phi i32 [ %add, %.lr.ph ], [ 0, %0 ]
-  %.01 = phi %struct.A* [ %addr, %.lr.ph ], [ %p, %0 ]
-  %a2 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
-  store i8 1, i8* %a2, align 1
-  %a3 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
-  store i8 2, i8* %a3, align 1
-  %a4 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 2
-  store i8 3, i8* %a4, align 1
-  %a5 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 3
-  store i8 4, i8* %a5, align 1
-  %a6 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 4
-  store i8 5, i8* %a6, align 1
-  %a7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 5
-  store i8 6, i8* %a7, align 1
-  %a8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 6
-  store i8 7, i8* %a8, align 1
-  %a9 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 7
-  store i8 8, i8* %a9, align 1
+  %.01 = phi ptr [ %addr, %.lr.ph ], [ %p, %0 ]
+  store i8 1, ptr %.01, align 1
+  %a3 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 1
+  store i8 2, ptr %a3, align 1
+  %a4 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 2
+  store i8 3, ptr %a4, align 1
+  %a5 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 3
+  store i8 4, ptr %a5, align 1
+  %a6 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 4
+  store i8 5, ptr %a6, align 1
+  %a7 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 5
+  store i8 6, ptr %a7, align 1
+  %a8 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 6
+  store i8 7, ptr %a8, align 1
+  %a9 = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 7
+  store i8 8, ptr %a9, align 1
 
   ;
-  %addr_last = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 8
-  store i32 0, i32* %addr_last, align 4
+  %addr_last = getelementptr inbounds %struct.A, ptr %.01, i64 0, i32 8
+  store i32 0, ptr %addr_last, align 4
 
 
   %add = add nsw i32 %i.02, 1
-  %addr = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
+  %addr = getelementptr inbounds %struct.A, ptr %.01, i64 1
   %exitcond = icmp eq i32 %add, %count
   br i1 %exitcond, label %._crit_edge, label %.lr.ph
 ._crit_edge:

diff  --git a/llvm/test/CodeGen/AArch64/pr33172.ll b/llvm/test/CodeGen/AArch64/pr33172.ll
index e1b4cdc6603c9..8cdc14fdb6d31 100644
--- a/llvm/test/CodeGen/AArch64/pr33172.ll
+++ b/llvm/test/CodeGen/AArch64/pr33172.ll
@@ -13,20 +13,20 @@ target triple = "arm64-apple-ios10.3.0"
 ; Function Attrs: nounwind ssp
 define void @pr33172() local_unnamed_addr  {
 entry:
-  %wide.load8281058.3 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 12) to i64*), align 8
-  %wide.load8291059.3 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 14) to i64*), align 8
-  store i64 %wide.load8281058.3, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 12) to i64*), align 8
-  store i64 %wide.load8291059.3, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 14) to i64*), align 8
-  %wide.load8281058.4 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 16) to i64*), align 8
-  %wide.load8291059.4 = load i64, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.b, i64 0, i64 18) to i64*), align 8
-  store i64 %wide.load8281058.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 16) to i64*), align 8
-  store i64 %wide.load8291059.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 18) to i64*), align 8
-  tail call void @llvm.memset.p0i8.i64(i8* align 8 bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i1 false) #2
+  %wide.load8281058.3 = load i64, ptr getelementptr inbounds ([200 x float], ptr @main.b, i64 0, i64 12), align 8
+  %wide.load8291059.3 = load i64, ptr getelementptr inbounds ([200 x float], ptr @main.b, i64 0, i64 14), align 8
+  store i64 %wide.load8281058.3, ptr getelementptr inbounds ([200 x float], ptr @main.x, i64 0, i64 12), align 8
+  store i64 %wide.load8291059.3, ptr getelementptr inbounds ([200 x float], ptr @main.x, i64 0, i64 14), align 8
+  %wide.load8281058.4 = load i64, ptr getelementptr inbounds ([200 x float], ptr @main.b, i64 0, i64 16), align 8
+  %wide.load8291059.4 = load i64, ptr getelementptr inbounds ([200 x float], ptr @main.b, i64 0, i64 18), align 8
+  store i64 %wide.load8281058.4, ptr getelementptr inbounds ([200 x float], ptr @main.x, i64 0, i64 16), align 8
+  store i64 %wide.load8291059.4, ptr getelementptr inbounds ([200 x float], ptr @main.x, i64 0, i64 18), align 8
+  tail call void @llvm.memset.p0.i64(ptr align 8 @main.b, i8 0, i64 undef, i1 false) #2
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
 
 attributes #1 = { argmemonly nounwind }
 attributes #2 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/pr51476.ll b/llvm/test/CodeGen/AArch64/pr51476.ll
index b71aed5c322ae..ce565a186ae05 100644
--- a/llvm/test/CodeGen/AArch64/pr51476.ll
+++ b/llvm/test/CodeGen/AArch64/pr51476.ll
@@ -19,8 +19,8 @@ define void @test(i8 %arg) nounwind {
   %tmp = alloca i8
   %cmp1 = icmp ne i8 %arg, 1
   %zext = zext i1 %cmp1 to i8
-  store i8 %zext, i8* %tmp
-  %zext2 = load i8, i8* %tmp
+  store i8 %zext, ptr %tmp
+  %zext2 = load i8, ptr %tmp
   %cmp2 = icmp eq i8 %zext2, 3
   br i1 %cmp2, label %exit, label %do_call
 

diff  --git a/llvm/test/CodeGen/AArch64/preferred-alignment.ll b/llvm/test/CodeGen/AArch64/preferred-alignment.ll
index ffff7e1d02fb3..c9c5212d479d9 100644
--- a/llvm/test/CodeGen/AArch64/preferred-alignment.ll
+++ b/llvm/test/CodeGen/AArch64/preferred-alignment.ll
@@ -9,19 +9,19 @@ entry:
 ; CHECK-NEXT:	add	x1, sp, #8
   %i = alloca i32, align 4
 ; CHECK-NEXT:	add	x2, sp, #4
-  %call = call i32 @bar(i8* %c, i16* %s, i32* %i)
-  %0 = load i8, i8* %c, align 1
+  %call = call i32 @bar(ptr %c, ptr %s, ptr %i)
+  %0 = load i8, ptr %c, align 1
   %conv = zext i8 %0 to i32
   %add = add nsw i32 %call, %conv
-  %1 = load i16, i16* %s, align 2
+  %1 = load i16, ptr %s, align 2
   %conv1 = sext i16 %1 to i32
   %add2 = add nsw i32 %add, %conv1
-  %2 = load i32, i32* %i, align 4
+  %2 = load i32, ptr %i, align 4
   %add3 = add nsw i32 %add2, %2
   ret i32 %add3
 }
 
-declare i32 @bar(i8*, i16*, i32*) #1
+declare i32 @bar(ptr, ptr, ptr) #1
 
 attributes #0 = { nounwind "frame-pointer"="none" }
 attributes #1 = { "frame-pointer"="none" }

diff  --git a/llvm/test/CodeGen/AArch64/prefixdata.ll b/llvm/test/CodeGen/AArch64/prefixdata.ll
index f62734c16e529..37d4cc02a44cb 100644
--- a/llvm/test/CodeGen/AArch64/prefixdata.ll
+++ b/llvm/test/CodeGen/AArch64/prefixdata.ll
@@ -22,7 +22,7 @@ define void @f() prefix i32 1 {
 ; ELF: .type g, at function
 ; ELF-NEXT: .xword	i
 ; ELF-NEXT: g:
-define void @g() prefix i32* @i {
+define void @g() prefix ptr @i {
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll b/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll
index 6fee5984c8ce8..b3fbe8bdb6e30 100644
--- a/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll
+++ b/llvm/test/CodeGen/AArch64/pull-binop-through-shift.ll
@@ -3,7 +3,7 @@
 
 ; shift left
 
-define i32 @and_signbit_shl(i32 %x, i32* %dst) {
+define i32 @and_signbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: and_signbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl w8, w0, #8
@@ -12,10 +12,10 @@ define i32 @and_signbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @and_nosignbit_shl(i32 %x, i32* %dst) {
+define i32 @and_nosignbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: and_nosignbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl w8, w0, #8
@@ -24,11 +24,11 @@ define i32 @and_nosignbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @or_signbit_shl(i32 %x, i32* %dst) {
+define i32 @or_signbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: or_signbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl w8, w0, #8
@@ -37,10 +37,10 @@ define i32 @or_signbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @or_nosignbit_shl(i32 %x, i32* %dst) {
+define i32 @or_nosignbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: or_nosignbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl w8, w0, #8
@@ -49,11 +49,11 @@ define i32 @or_nosignbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @xor_signbit_shl(i32 %x, i32* %dst) {
+define i32 @xor_signbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: xor_signbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl w8, w0, #8
@@ -62,10 +62,10 @@ define i32 @xor_signbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @xor_nosignbit_shl(i32 %x, i32* %dst) {
+define i32 @xor_nosignbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: xor_nosignbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl w8, w0, #8
@@ -74,11 +74,11 @@ define i32 @xor_nosignbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @add_signbit_shl(i32 %x, i32* %dst) {
+define i32 @add_signbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: add_signbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #-16777216
@@ -87,10 +87,10 @@ define i32 @add_signbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @add_nosignbit_shl(i32 %x, i32* %dst) {
+define i32 @add_nosignbit_shl(i32 %x, ptr %dst) {
 ; CHECK-LABEL: add_nosignbit_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #-16777216
@@ -99,13 +99,13 @@ define i32 @add_nosignbit_shl(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
   %r = shl i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
 ; logical shift right
 
-define i32 @and_signbit_lshr(i32 %x, i32* %dst) {
+define i32 @and_signbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: and_signbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -114,10 +114,10 @@ define i32 @and_signbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @and_nosignbit_lshr(i32 %x, i32* %dst) {
+define i32 @and_nosignbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: and_nosignbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -126,11 +126,11 @@ define i32 @and_nosignbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @or_signbit_lshr(i32 %x, i32* %dst) {
+define i32 @or_signbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: or_signbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -139,10 +139,10 @@ define i32 @or_signbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @or_nosignbit_lshr(i32 %x, i32* %dst) {
+define i32 @or_nosignbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: or_nosignbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -151,11 +151,11 @@ define i32 @or_nosignbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @xor_signbit_lshr(i32 %x, i32* %dst) {
+define i32 @xor_signbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: xor_signbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -164,10 +164,10 @@ define i32 @xor_signbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @xor_nosignbit_lshr(i32 %x, i32* %dst) {
+define i32 @xor_nosignbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: xor_nosignbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -176,11 +176,11 @@ define i32 @xor_nosignbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @add_signbit_lshr(i32 %x, i32* %dst) {
+define i32 @add_signbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: add_signbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w0, #16, lsl #12 // =65536
@@ -189,10 +189,10 @@ define i32 @add_signbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @add_nosignbit_lshr(i32 %x, i32* %dst) {
+define i32 @add_nosignbit_lshr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: add_nosignbit_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2147418112
@@ -202,13 +202,13 @@ define i32 @add_nosignbit_lshr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
   %r = lshr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
 ; arithmetic shift right
 
-define i32 @and_signbit_ashr(i32 %x, i32* %dst) {
+define i32 @and_signbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: and_signbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #8
@@ -217,10 +217,10 @@ define i32 @and_signbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @and_nosignbit_ashr(i32 %x, i32* %dst) {
+define i32 @and_nosignbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: and_nosignbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -229,11 +229,11 @@ define i32 @and_nosignbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @or_signbit_ashr(i32 %x, i32* %dst) {
+define i32 @or_signbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: or_signbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #8
@@ -242,10 +242,10 @@ define i32 @or_signbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @or_nosignbit_ashr(i32 %x, i32* %dst) {
+define i32 @or_nosignbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: or_nosignbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #8
@@ -254,11 +254,11 @@ define i32 @or_nosignbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @xor_signbit_ashr(i32 %x, i32* %dst) {
+define i32 @xor_signbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: xor_signbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #8
@@ -267,10 +267,10 @@ define i32 @xor_signbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @xor_nosignbit_ashr(i32 %x, i32* %dst) {
+define i32 @xor_nosignbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: xor_nosignbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #8
@@ -279,11 +279,11 @@ define i32 @xor_nosignbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @add_signbit_ashr(i32 %x, i32* %dst) {
+define i32 @add_signbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: add_signbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w0, #16, lsl #12 // =65536
@@ -292,10 +292,10 @@ define i32 @add_signbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @add_nosignbit_ashr(i32 %x, i32* %dst) {
+define i32 @add_nosignbit_ashr(i32 %x, ptr %dst) {
 ; CHECK-LABEL: add_nosignbit_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2147418112
@@ -305,6 +305,6 @@ define i32 @add_nosignbit_ashr(i32 %x, i32* %dst) {
 ; CHECK-NEXT:    ret
   %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
   %r = ashr i32 %t0, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll b/llvm/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll
index 672d97f354da7..879c56f6f7c8b 100644
--- a/llvm/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll
+++ b/llvm/test/CodeGen/AArch64/pull-conditional-binop-through-shift.ll
@@ -3,7 +3,7 @@
 
 ; shift left
 
-define i32 @and_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @and_signbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: and_signbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff0000
@@ -15,10 +15,10 @@ define i32 @and_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @and_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @and_nosignbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: and_nosignbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff0000
@@ -30,11 +30,11 @@ define i32 @and_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @or_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @or_signbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: or_signbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w0, #0xff0000
@@ -46,10 +46,10 @@ define i32 @or_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @or_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @or_nosignbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: or_nosignbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w0, #0xff0000
@@ -61,11 +61,11 @@ define i32 @or_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @xor_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @xor_signbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: xor_signbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor w8, w0, #0xff0000
@@ -77,10 +77,10 @@ define i32 @xor_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @xor_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @xor_nosignbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: xor_nosignbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor w8, w0, #0xff0000
@@ -92,11 +92,11 @@ define i32 @xor_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @add_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @add_signbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: add_signbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w0, #16, lsl #12 // =65536
@@ -108,10 +108,10 @@ define i32 @add_signbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @add_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
+define i32 @add_nosignbit_select_shl(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: add_nosignbit_select_shl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2147418112
@@ -124,13 +124,13 @@ define i32 @add_nosignbit_select_shl(i32 %x, i1 %cond, i32* %dst) {
   %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = shl i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
 ; logical shift right
 
-define i32 @and_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @and_signbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: and_signbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff0000
@@ -142,10 +142,10 @@ define i32 @and_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @and_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @and_nosignbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: and_nosignbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0x7fff0000
@@ -157,11 +157,11 @@ define i32 @and_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @or_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @or_signbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: or_signbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w0, #0xffff0000
@@ -173,10 +173,10 @@ define i32 @or_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @or_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @or_nosignbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: or_nosignbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w0, #0x7fff0000
@@ -188,11 +188,11 @@ define i32 @or_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @xor_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @xor_signbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: xor_signbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor w8, w0, #0xffff0000
@@ -204,10 +204,10 @@ define i32 @xor_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @xor_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @xor_nosignbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: xor_nosignbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor w8, w0, #0x7fff0000
@@ -219,11 +219,11 @@ define i32 @xor_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @add_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @add_signbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: add_signbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w0, #16, lsl #12 // =65536
@@ -235,10 +235,10 @@ define i32 @add_signbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @add_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @add_nosignbit_select_lshr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: add_nosignbit_select_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2147418112
@@ -251,13 +251,13 @@ define i32 @add_nosignbit_select_lshr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = lshr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
 ; arithmetic shift right
 
-define i32 @and_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @and_signbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: and_signbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff0000
@@ -269,10 +269,10 @@ define i32 @and_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = and i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @and_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @and_nosignbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: and_nosignbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0x7fff0000
@@ -284,11 +284,11 @@ define i32 @and_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = and i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @or_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @or_signbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: or_signbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w0, #0xffff0000
@@ -300,10 +300,10 @@ define i32 @or_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = or i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @or_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @or_nosignbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: or_nosignbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w0, #0x7fff0000
@@ -315,11 +315,11 @@ define i32 @or_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = or i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @xor_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @xor_signbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: xor_signbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor w8, w0, #0xffff0000
@@ -331,10 +331,10 @@ define i32 @xor_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = xor i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @xor_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @xor_nosignbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: xor_nosignbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    eor w8, w0, #0x7fff0000
@@ -346,11 +346,11 @@ define i32 @xor_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = xor i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
 
-define i32 @add_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @add_signbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: add_signbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w0, #16, lsl #12 // =65536
@@ -362,10 +362,10 @@ define i32 @add_signbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = add i32 %x, 4294901760 ; 0xFFFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }
-define i32 @add_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
+define i32 @add_nosignbit_select_ashr(i32 %x, i1 %cond, ptr %dst) {
 ; CHECK-LABEL: add_nosignbit_select_ashr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2147418112
@@ -378,6 +378,6 @@ define i32 @add_nosignbit_select_ashr(i32 %x, i1 %cond, i32* %dst) {
   %t0 = add i32 %x, 2147418112 ; 0x7FFF0000
   %t1 = select i1 %cond, i32 %t0, i32 %x
   %r = ashr i32 %t1, 8
-  store i32 %r, i32* %dst
+  store i32 %r, ptr %dst
   ret i32 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/ragreedy-csr.ll b/llvm/test/CodeGen/AArch64/ragreedy-csr.ll
index a7e3015346f7e..e5c1d1a04a99e 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-csr.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-csr.ll
@@ -19,47 +19,45 @@
 ; CHECK-NOT: ldp x23, x24
 ; CHECK-NOT: ldp x25,
 
-%struct.List_o_links_struct = type { i32, i32, i32, %struct.List_o_links_struct* }
-%struct.Connector_struct = type { i16, i16, i8, i8, %struct.Connector_struct*, i8* }
-%struct._RuneLocale = type { [8 x i8], [32 x i8], i32 (i8*, i64, i8**)*, i32 (i32, i8*, i64, i8**)*, i32, [256 x i32], [256 x i32], [256 x i32], %struct._RuneRange, %struct._RuneRange, %struct._RuneRange, i8*, i32, i32, %struct._RuneCharClass* }
-%struct._RuneRange = type { i32, %struct._RuneEntry* }
-%struct._RuneEntry = type { i32, i32, i32, i32* }
+%struct.List_o_links_struct = type { i32, i32, i32, ptr }
+%struct.Connector_struct = type { i16, i16, i8, i8, ptr, ptr }
+%struct._RuneLocale = type { [8 x i8], [32 x i8], ptr, ptr, i32, [256 x i32], [256 x i32], [256 x i32], %struct._RuneRange, %struct._RuneRange, %struct._RuneRange, ptr, i32, i32, ptr }
+%struct._RuneRange = type { i32, ptr }
+%struct._RuneEntry = type { i32, i32, i32, ptr }
 %struct._RuneCharClass = type { [14 x i8], i32 }
 %struct.Exp_struct = type { i8, i8, i8, i8, %union.anon }
-%union.anon = type { %struct.E_list_struct* }
-%struct.E_list_struct = type { %struct.E_list_struct*, %struct.Exp_struct* }
-%struct.domain_struct = type { i8*, i32, %struct.List_o_links_struct*, i32, i32, %struct.d_tree_leaf_struct*, %struct.domain_struct* }
-%struct.d_tree_leaf_struct = type { %struct.domain_struct*, i32, %struct.d_tree_leaf_struct* }
+%union.anon = type { ptr }
+%struct.E_list_struct = type { ptr, ptr }
+%struct.domain_struct = type { ptr, i32, ptr, i32, i32, ptr, ptr }
+%struct.d_tree_leaf_struct = type { ptr, i32, ptr }
 @_DefaultRuneLocale = external global %struct._RuneLocale
 declare i32 @__maskrune(i32, i64) #7
-define fastcc i32 @prune_match(%struct.Connector_struct* nocapture readonly %a, %struct.Connector_struct* nocapture readonly %b) #9 {
+define fastcc i32 @prune_match(ptr nocapture readonly %a, ptr nocapture readonly %b) #9 {
 entry:
-  %label56 = bitcast %struct.Connector_struct* %a to i16*
-  %0 = load i16, i16* %label56, align 2
-  %label157 = bitcast %struct.Connector_struct* %b to i16*
-  %1 = load i16, i16* %label157, align 2
+  %0 = load i16, ptr %a, align 2
+  %1 = load i16, ptr %b, align 2
   %cmp = icmp eq i16 %0, %1
   br i1 %cmp, label %if.end, label %return, !prof !988
 if.end:
-  %priority = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 2
-  %2 = load i8, i8* %priority, align 1
-  %priority5 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 2
-  %3 = load i8, i8* %priority5, align 1
-  %string = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 5
-  %4 = load i8*, i8** %string, align 8
-  %string7 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 5
-  %5 = load i8*, i8** %string7, align 8
+  %priority = getelementptr inbounds %struct.Connector_struct, ptr %a, i64 0, i32 2
+  %2 = load i8, ptr %priority, align 1
+  %priority5 = getelementptr inbounds %struct.Connector_struct, ptr %b, i64 0, i32 2
+  %3 = load i8, ptr %priority5, align 1
+  %string = getelementptr inbounds %struct.Connector_struct, ptr %a, i64 0, i32 5
+  %4 = load ptr, ptr %string, align 8
+  %string7 = getelementptr inbounds %struct.Connector_struct, ptr %b, i64 0, i32 5
+  %5 = load ptr, ptr %string7, align 8
   br label %while.cond
 while.cond:
   %lsr.iv27 = phi i64 [ %lsr.iv.next28, %if.end17 ], [ 0, %if.end ]
-  %scevgep55 = getelementptr i8, i8* %4, i64 %lsr.iv27
-  %6 = load i8, i8* %scevgep55, align 1
+  %scevgep55 = getelementptr i8, ptr %4, i64 %lsr.iv27
+  %6 = load i8, ptr %scevgep55, align 1
   %idxprom.i.i = sext i8 %6 to i64
   %isascii.i.i224 = icmp sgt i8 %6, -1
   br i1 %isascii.i.i224, label %cond.true.i.i, label %cond.false.i.i, !prof !181
 cond.true.i.i:
-  %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
-  %7 = load i32, i32* %arrayidx.i.i, align 4
+  %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale, ptr @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
+  %7 = load i32, ptr %arrayidx.i.i, align 4
   %and.i.i = and i32 %7, 32768
   br label %isupper.exit
 cond.false.i.i:
@@ -72,16 +70,16 @@ isupper.exit:
   %tobool1.sink.i.i = icmp eq i32 %tobool1.sink.i.in.i, 0
   br i1 %tobool1.sink.i.i, label %lor.rhs, label %while.body, !prof !989
 lor.rhs:
-  %sunkaddr = ptrtoint i8* %5 to i64
+  %sunkaddr = ptrtoint ptr %5 to i64
   %sunkaddr58 = add i64 %sunkaddr, %lsr.iv27
-  %sunkaddr59 = inttoptr i64 %sunkaddr58 to i8*
-  %9 = load i8, i8* %sunkaddr59, align 1
+  %sunkaddr59 = inttoptr i64 %sunkaddr58 to ptr
+  %9 = load i8, ptr %sunkaddr59, align 1
   %idxprom.i.i214 = sext i8 %9 to i64
   %isascii.i.i213225 = icmp sgt i8 %9, -1
   br i1 %isascii.i.i213225, label %cond.true.i.i217, label %cond.false.i.i219, !prof !181
 cond.true.i.i217:
-  %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
-  %10 = load i32, i32* %arrayidx.i.i215, align 4
+  %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale, ptr @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
+  %10 = load i32, ptr %arrayidx.i.i215, align 4
   %and.i.i216 = and i32 %10, 32768
   br label %isupper.exit223
 cond.false.i.i219:
@@ -94,14 +92,14 @@ isupper.exit223:
   %tobool1.sink.i.i221 = icmp eq i32 %tobool1.sink.i.in.i220, 0
   br i1 %tobool1.sink.i.i221, label %while.end, label %while.body, !prof !990
 while.body:
-  %sunkaddr60 = ptrtoint i8* %4 to i64
+  %sunkaddr60 = ptrtoint ptr %4 to i64
   %sunkaddr61 = add i64 %sunkaddr60, %lsr.iv27
-  %sunkaddr62 = inttoptr i64 %sunkaddr61 to i8*
-  %12 = load i8, i8* %sunkaddr62, align 1
-  %sunkaddr63 = ptrtoint i8* %5 to i64
+  %sunkaddr62 = inttoptr i64 %sunkaddr61 to ptr
+  %12 = load i8, ptr %sunkaddr62, align 1
+  %sunkaddr63 = ptrtoint ptr %5 to i64
   %sunkaddr64 = add i64 %sunkaddr63, %lsr.iv27
-  %sunkaddr65 = inttoptr i64 %sunkaddr64 to i8*
-  %13 = load i8, i8* %sunkaddr65, align 1
+  %sunkaddr65 = inttoptr i64 %sunkaddr64 to ptr
+  %13 = load i8, ptr %sunkaddr65, align 1
   %cmp14 = icmp eq i8 %12, %13
   br i1 %cmp14, label %if.end17, label %return, !prof !991
 if.end17:
@@ -112,16 +110,16 @@ while.end:
   %15 = icmp eq i8 %14, 0
   br i1 %15, label %if.then23, label %if.else88, !prof !992
 if.then23:
-  %sunkaddr66 = ptrtoint %struct.Connector_struct* %a to i64
+  %sunkaddr66 = ptrtoint ptr %a to i64
   %sunkaddr67 = add i64 %sunkaddr66, 16
-  %sunkaddr68 = inttoptr i64 %sunkaddr67 to i8**
-  %16 = load i8*, i8** %sunkaddr68, align 8
-  %17 = load i8, i8* %16, align 1
+  %sunkaddr68 = inttoptr i64 %sunkaddr67 to ptr
+  %16 = load ptr, ptr %sunkaddr68, align 8
+  %17 = load i8, ptr %16, align 1
   %cmp26 = icmp eq i8 %17, 83
-  %sunkaddr69 = ptrtoint i8* %4 to i64
+  %sunkaddr69 = ptrtoint ptr %4 to i64
   %sunkaddr70 = add i64 %sunkaddr69, %lsr.iv27
-  %sunkaddr71 = inttoptr i64 %sunkaddr70 to i8*
-  %18 = load i8, i8* %sunkaddr71, align 1
+  %sunkaddr71 = inttoptr i64 %sunkaddr70 to ptr
+  %18 = load i8, ptr %sunkaddr71, align 1
   br i1 %cmp26, label %land.lhs.true28, label %while.cond59.preheader, !prof !993
 land.lhs.true28:
   switch i8 %18, label %land.rhs.preheader [
@@ -129,35 +127,35 @@ land.lhs.true28:
     i8 0, label %return
   ], !prof !994
 land.lhs.true35:
-  %sunkaddr72 = ptrtoint i8* %5 to i64
+  %sunkaddr72 = ptrtoint ptr %5 to i64
   %sunkaddr73 = add i64 %sunkaddr72, %lsr.iv27
-  %sunkaddr74 = inttoptr i64 %sunkaddr73 to i8*
-  %19 = load i8, i8* %sunkaddr74, align 1
+  %sunkaddr74 = inttoptr i64 %sunkaddr73 to ptr
+  %19 = load i8, ptr %sunkaddr74, align 1
   switch i8 %19, label %land.rhs.preheader [
     i8 112, label %land.lhs.true43
   ], !prof !995
 land.lhs.true43:
-  %20 = ptrtoint i8* %16 to i64
+  %20 = ptrtoint ptr %16 to i64
   %21 = sub i64 0, %20
-  %scevgep52 = getelementptr i8, i8* %4, i64 %21
-  %scevgep53 = getelementptr i8, i8* %scevgep52, i64 %lsr.iv27
-  %scevgep54 = getelementptr i8, i8* %scevgep53, i64 -1
-  %cmp45 = icmp eq i8* %scevgep54, null
+  %scevgep52 = getelementptr i8, ptr %4, i64 %21
+  %scevgep53 = getelementptr i8, ptr %scevgep52, i64 %lsr.iv27
+  %scevgep54 = getelementptr i8, ptr %scevgep53, i64 -1
+  %cmp45 = icmp eq ptr %scevgep54, null
   br i1 %cmp45, label %return, label %lor.lhs.false47, !prof !996
 lor.lhs.false47:
-  %22 = ptrtoint i8* %16 to i64
+  %22 = ptrtoint ptr %16 to i64
   %23 = sub i64 0, %22
-  %scevgep47 = getelementptr i8, i8* %4, i64 %23
-  %scevgep48 = getelementptr i8, i8* %scevgep47, i64 %lsr.iv27
-  %scevgep49 = getelementptr i8, i8* %scevgep48, i64 -2
-  %cmp50 = icmp eq i8* %scevgep49, null
+  %scevgep47 = getelementptr i8, ptr %4, i64 %23
+  %scevgep48 = getelementptr i8, ptr %scevgep47, i64 %lsr.iv27
+  %scevgep49 = getelementptr i8, ptr %scevgep48, i64 -2
+  %cmp50 = icmp eq ptr %scevgep49, null
   br i1 %cmp50, label %land.lhs.true52, label %while.cond59.preheader, !prof !997
 land.lhs.true52:
-  %sunkaddr75 = ptrtoint i8* %4 to i64
+  %sunkaddr75 = ptrtoint ptr %4 to i64
   %sunkaddr76 = add i64 %sunkaddr75, %lsr.iv27
   %sunkaddr77 = add i64 %sunkaddr76, -1
-  %sunkaddr78 = inttoptr i64 %sunkaddr77 to i8*
-  %24 = load i8, i8* %sunkaddr78, align 1
+  %sunkaddr78 = inttoptr i64 %sunkaddr77 to ptr
+  %24 = load i8, ptr %sunkaddr78, align 1
   %cmp55 = icmp eq i8 %24, 73
   %cmp61233 = icmp eq i8 %18, 0
   %or.cond265 = or i1 %cmp55, %cmp61233
@@ -166,14 +164,14 @@ while.cond59.preheader:
   %cmp61233.old = icmp eq i8 %18, 0
   br i1 %cmp61233.old, label %return, label %land.rhs.preheader, !prof !999
 land.rhs.preheader:
-  %scevgep33 = getelementptr i8, i8* %5, i64 %lsr.iv27
-  %scevgep43 = getelementptr i8, i8* %4, i64 %lsr.iv27
+  %scevgep33 = getelementptr i8, ptr %5, i64 %lsr.iv27
+  %scevgep43 = getelementptr i8, ptr %4, i64 %lsr.iv27
   br label %land.rhs
 land.rhs:
   %lsr.iv = phi i64 [ 0, %land.rhs.preheader ], [ %lsr.iv.next, %if.then83 ]
   %25 = phi i8 [ %27, %if.then83 ], [ %18, %land.rhs.preheader ]
-  %scevgep34 = getelementptr i8, i8* %scevgep33, i64 %lsr.iv
-  %26 = load i8, i8* %scevgep34, align 1
+  %scevgep34 = getelementptr i8, ptr %scevgep33, i64 %lsr.iv
+  %26 = load i8, ptr %scevgep34, align 1
   %cmp64 = icmp eq i8 %26, 0
   br i1 %cmp64, label %return, label %while.body66, !prof !1000
 while.body66:
@@ -187,9 +185,9 @@ lor.lhs.false74:
   %or.cond208 = or i1 %cmp77, %cmp81
   br i1 %or.cond208, label %return, label %if.then83, !prof !1002
 if.then83:
-  %scevgep44 = getelementptr i8, i8* %scevgep43, i64 %lsr.iv
-  %scevgep45 = getelementptr i8, i8* %scevgep44, i64 1
-  %27 = load i8, i8* %scevgep45, align 1
+  %scevgep44 = getelementptr i8, ptr %scevgep43, i64 %lsr.iv
+  %scevgep45 = getelementptr i8, ptr %scevgep44, i64 1
+  %27 = load i8, ptr %scevgep45, align 1
   %cmp61 = icmp eq i8 %27, 0
   %lsr.iv.next = add i64 %lsr.iv, 1
   br i1 %cmp61, label %return, label %land.rhs, !prof !999
@@ -199,21 +197,21 @@ if.else88:
   %or.cond159 = and i1 %cmp89, %cmp92
   br i1 %or.cond159, label %while.cond95.preheader, label %if.else123, !prof !1003
 while.cond95.preheader:
-  %sunkaddr79 = ptrtoint i8* %4 to i64
+  %sunkaddr79 = ptrtoint ptr %4 to i64
   %sunkaddr80 = add i64 %sunkaddr79, %lsr.iv27
-  %sunkaddr81 = inttoptr i64 %sunkaddr80 to i8*
-  %28 = load i8, i8* %sunkaddr81, align 1
+  %sunkaddr81 = inttoptr i64 %sunkaddr80 to ptr
+  %28 = load i8, ptr %sunkaddr81, align 1
   %cmp97238 = icmp eq i8 %28, 0
   br i1 %cmp97238, label %return, label %land.rhs99.preheader, !prof !1004
 land.rhs99.preheader:
-  %scevgep31 = getelementptr i8, i8* %5, i64 %lsr.iv27
-  %scevgep40 = getelementptr i8, i8* %4, i64 %lsr.iv27
+  %scevgep31 = getelementptr i8, ptr %5, i64 %lsr.iv27
+  %scevgep40 = getelementptr i8, ptr %4, i64 %lsr.iv27
   br label %land.rhs99
 land.rhs99:
   %lsr.iv17 = phi i64 [ 0, %land.rhs99.preheader ], [ %lsr.iv.next18, %if.then117 ]
   %29 = phi i8 [ %31, %if.then117 ], [ %28, %land.rhs99.preheader ]
-  %scevgep32 = getelementptr i8, i8* %scevgep31, i64 %lsr.iv17
-  %30 = load i8, i8* %scevgep32, align 1
+  %scevgep32 = getelementptr i8, ptr %scevgep31, i64 %lsr.iv17
+  %30 = load i8, ptr %scevgep32, align 1
   %cmp101 = icmp eq i8 %30, 0
   br i1 %cmp101, label %return, label %while.body104, !prof !1005
 while.body104:
@@ -224,9 +222,9 @@ while.body104:
   %or.cond210 = or i1 %or.cond209, %cmp115
   br i1 %or.cond210, label %if.then117, label %return, !prof !1006
 if.then117:
-  %scevgep41 = getelementptr i8, i8* %scevgep40, i64 %lsr.iv17
-  %scevgep42 = getelementptr i8, i8* %scevgep41, i64 1
-  %31 = load i8, i8* %scevgep42, align 1
+  %scevgep41 = getelementptr i8, ptr %scevgep40, i64 %lsr.iv17
+  %scevgep42 = getelementptr i8, ptr %scevgep41, i64 1
+  %31 = load i8, ptr %scevgep42, align 1
   %cmp97 = icmp eq i8 %31, 0
   %lsr.iv.next18 = add i64 %lsr.iv17, 1
   br i1 %cmp97, label %return, label %land.rhs99, !prof !1004
@@ -236,21 +234,21 @@ if.else123:
   %or.cond160 = and i1 %cmp124, %cmp127
   br i1 %or.cond160, label %while.cond130.preheader, label %return, !prof !1007
 while.cond130.preheader:
-  %sunkaddr82 = ptrtoint i8* %4 to i64
+  %sunkaddr82 = ptrtoint ptr %4 to i64
   %sunkaddr83 = add i64 %sunkaddr82, %lsr.iv27
-  %sunkaddr84 = inttoptr i64 %sunkaddr83 to i8*
-  %32 = load i8, i8* %sunkaddr84, align 1
+  %sunkaddr84 = inttoptr i64 %sunkaddr83 to ptr
+  %32 = load i8, ptr %sunkaddr84, align 1
   %cmp132244 = icmp eq i8 %32, 0
   br i1 %cmp132244, label %return, label %land.rhs134.preheader, !prof !1008
 land.rhs134.preheader:
-  %scevgep29 = getelementptr i8, i8* %5, i64 %lsr.iv27
-  %scevgep37 = getelementptr i8, i8* %4, i64 %lsr.iv27
+  %scevgep29 = getelementptr i8, ptr %5, i64 %lsr.iv27
+  %scevgep37 = getelementptr i8, ptr %4, i64 %lsr.iv27
   br label %land.rhs134
 land.rhs134:
   %lsr.iv22 = phi i64 [ 0, %land.rhs134.preheader ], [ %lsr.iv.next23, %if.then152 ]
   %33 = phi i8 [ %35, %if.then152 ], [ %32, %land.rhs134.preheader ]
-  %scevgep30 = getelementptr i8, i8* %scevgep29, i64 %lsr.iv22
-  %34 = load i8, i8* %scevgep30, align 1
+  %scevgep30 = getelementptr i8, ptr %scevgep29, i64 %lsr.iv22
+  %34 = load i8, ptr %scevgep30, align 1
   %cmp136 = icmp eq i8 %34, 0
   br i1 %cmp136, label %return, label %while.body139, !prof !1009
 while.body139:
@@ -261,9 +259,9 @@ while.body139:
   %or.cond212 = or i1 %or.cond211, %cmp150
   br i1 %or.cond212, label %if.then152, label %return, !prof !1010
 if.then152:
-  %scevgep38 = getelementptr i8, i8* %scevgep37, i64 %lsr.iv22
-  %scevgep39 = getelementptr i8, i8* %scevgep38, i64 1
-  %35 = load i8, i8* %scevgep39, align 1
+  %scevgep38 = getelementptr i8, ptr %scevgep37, i64 %lsr.iv22
+  %scevgep39 = getelementptr i8, ptr %scevgep38, i64 1
+  %35 = load i8, ptr %scevgep39, align 1
   %cmp132 = icmp eq i8 %35, 0
   %lsr.iv.next23 = add i64 %lsr.iv22, 1
   br i1 %cmp132, label %return, label %land.rhs134, !prof !1008

diff  --git a/llvm/test/CodeGen/AArch64/rand.ll b/llvm/test/CodeGen/AArch64/rand.ll
index c10bdeff8e0c9..b742a9ab43d35 100644
--- a/llvm/test/CodeGen/AArch64/rand.ll
+++ b/llvm/test/CodeGen/AArch64/rand.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 -mattr=+v8.5a,+rand %s -o - | FileCheck %s
 
-define  i32 @rndr(i64* %__addr) {
+define  i32 @rndr(ptr %__addr) {
 ; CHECK-LABEL: rndr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mrs x10, RNDR
@@ -14,13 +14,13 @@ define  i32 @rndr(i64* %__addr) {
   %1 = tail call { i64, i1 } @llvm.aarch64.rndr()
   %2 = extractvalue { i64, i1 } %1, 0
   %3 = extractvalue { i64, i1 } %1, 1
-  store i64 %2, i64* %__addr, align 8
+  store i64 %2, ptr %__addr, align 8
   %4 = zext i1 %3 to i32
   ret i32 %4
 }
 
 
-define  i32 @rndrrs(i64*  %__addr) {
+define  i32 @rndrrs(ptr  %__addr) {
 ; CHECK-LABEL: rndrrs:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mrs x10, RNDRRS
@@ -33,7 +33,7 @@ define  i32 @rndrrs(i64*  %__addr) {
   %1 = tail call { i64, i1 } @llvm.aarch64.rndrrs()
   %2 = extractvalue { i64, i1 } %1, 0
   %3 = extractvalue { i64, i1 } %1, 1
-  store i64 %2, i64* %__addr, align 8
+  store i64 %2, ptr %__addr, align 8
   %4 = zext i1 %3 to i32
   ret i32 %4
 }

diff  --git a/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll b/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
index ed34cbd2fa0b6..435a06ed6243a 100644
--- a/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
+++ b/llvm/test/CodeGen/AArch64/redundant-copy-elim-empty-mbb.ll
@@ -5,7 +5,7 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux-gnu"
 
-declare i8* @bar()
+declare ptr @bar()
 
 ; CHECK-LABEL: foo:
 ; CHECK: tbz
@@ -19,8 +19,8 @@ entry:
   br i1 %start, label %cleanup, label %if.end
 
 if.end:                                           ; preds = %if.end, %entry
-  %call = tail call i8* @bar()
-  %cmp = icmp eq i8* %call, null
+  %call = tail call ptr @bar()
+  %cmp = icmp eq ptr %call, null
   br i1 %cmp, label %cleanup, label %if.end
 
 cleanup:                                          ; preds = %if.end, %entry

diff  --git a/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll b/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll
index 965aa0d062d5e..c17d4ec4e9c17 100644
--- a/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll
+++ b/llvm/test/CodeGen/AArch64/regress-tail-livereg.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -verify-machineinstrs -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s
- at var = global void()* zeroinitializer
+ at var = global ptr zeroinitializer
 
 declare void @bar()
 
 define void @foo() {
 ; CHECK-LABEL: foo:
-       %func = load void()*, void()** @var
+       %func = load ptr, ptr @var
 
        ; Calling a function encourages @foo to use a callee-saved register,
        ; which makes it a natural choice for the tail call itself. But we don't
@@ -24,10 +24,9 @@ define void @test_x30_tail() {
 ; CHECK-LABEL: test_x30_tail:
 ; CHECK: mov [[DEST:x[0-9]+]], x30
 ; CHECK: br [[DEST]]
-  %addr = call i8* @llvm.returnaddress(i32 0)
-  %faddr = bitcast i8* %addr to void()*
-  tail call void %faddr()
+  %addr = call ptr @llvm.returnaddress(i32 0)
+  tail call void %addr()
   ret void
 }
 
-declare i8* @llvm.returnaddress(i32)
+declare ptr @llvm.returnaddress(i32)

diff  --git a/llvm/test/CodeGen/AArch64/regress-tblgen-chains.ll b/llvm/test/CodeGen/AArch64/regress-tblgen-chains.ll
index c16009727aab2..ffe4f4d1bbd64 100644
--- a/llvm/test/CodeGen/AArch64/regress-tblgen-chains.ll
+++ b/llvm/test/CodeGen/AArch64/regress-tblgen-chains.ll
@@ -10,7 +10,7 @@
 
 ; This was obviously a Bad Thing.
 
-declare void @bar(i8*)
+declare void @bar(ptr)
 
 define i64 @test_chains() {
 ; CHECK-LABEL: test_chains:
@@ -34,16 +34,16 @@ define i64 @test_chains() {
 
   %locvar = alloca i8
 
-  call void @bar(i8* %locvar)
+  call void @bar(ptr %locvar)
 
-  %inc.1 = load i8, i8* %locvar
+  %inc.1 = load i8, ptr %locvar
   %inc.2 = zext i8 %inc.1 to i64
   %inc.3 = add i64 %inc.2, 1
   %inc.4 = trunc i64 %inc.3 to i8
-  store i8 %inc.4, i8* %locvar
+  store i8 %inc.4, ptr %locvar
 
 
-  %ret.1 = load i8, i8* %locvar
+  %ret.1 = load i8, ptr %locvar
   %ret.2 = zext i8 %ret.1 to i64
   ret i64 %ret.2
 }

diff  --git a/llvm/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll b/llvm/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
index 0d1ce41696cc4..01943f40d41e8 100644
--- a/llvm/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
+++ b/llvm/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
@@ -7,30 +7,30 @@ define void @test_w29_reserved() {
 ; CHECK-LABEL: test_w29_reserved:
 ; CHECK: mov x29, sp
 
-  %val1 = load volatile i32, i32* @var
-  %val2 = load volatile i32, i32* @var
-  %val3 = load volatile i32, i32* @var
-  %val4 = load volatile i32, i32* @var
-  %val5 = load volatile i32, i32* @var
-  %val6 = load volatile i32, i32* @var
-  %val7 = load volatile i32, i32* @var
-  %val8 = load volatile i32, i32* @var
-  %val9 = load volatile i32, i32* @var
+  %val1 = load volatile i32, ptr @var
+  %val2 = load volatile i32, ptr @var
+  %val3 = load volatile i32, ptr @var
+  %val4 = load volatile i32, ptr @var
+  %val5 = load volatile i32, ptr @var
+  %val6 = load volatile i32, ptr @var
+  %val7 = load volatile i32, ptr @var
+  %val8 = load volatile i32, ptr @var
+  %val9 = load volatile i32, ptr @var
 
 ; CHECK-NOT: ldr w29,
 
   ; Call to prevent fp-elim that occurs regardless in leaf functions.
   call void @bar()
 
-  store volatile i32 %val1,  i32* @var
-  store volatile i32 %val2,  i32* @var
-  store volatile i32 %val3,  i32* @var
-  store volatile i32 %val4,  i32* @var
-  store volatile i32 %val5,  i32* @var
-  store volatile i32 %val6,  i32* @var
-  store volatile i32 %val7,  i32* @var
-  store volatile i32 %val8,  i32* @var
-  store volatile i32 %val9,  i32* @var
+  store volatile i32 %val1,  ptr @var
+  store volatile i32 %val2,  ptr @var
+  store volatile i32 %val3,  ptr @var
+  store volatile i32 %val4,  ptr @var
+  store volatile i32 %val5,  ptr @var
+  store volatile i32 %val6,  ptr @var
+  store volatile i32 %val7,  ptr @var
+  store volatile i32 %val8,  ptr @var
+  store volatile i32 %val9,  ptr @var
 
   ret void
 ; CHECK: ret

diff  --git a/llvm/test/CodeGen/AArch64/relaxed-fp-atomics.ll b/llvm/test/CodeGen/AArch64/relaxed-fp-atomics.ll
index 1f8ba6da24646..95abbb6979be8 100644
--- a/llvm/test/CodeGen/AArch64/relaxed-fp-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/relaxed-fp-atomics.ll
@@ -1,91 +1,91 @@
 ; PR52927: Relaxed atomics can load to/store from fp regs directly
 ; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s
 
-define float @atomic_load_relaxed_f32(float* %p, i32 %off32, i64 %off64) #0 {
+define float @atomic_load_relaxed_f32(ptr %p, i32 %off32, i64 %off64) #0 {
 ; CHECK-LABEL: atomic_load_relaxed_f32:
-  %ptr_unsigned = getelementptr float, float* %p, i32 4095
-  %val_unsigned = load atomic float, float* %ptr_unsigned monotonic, align 4
+  %ptr_unsigned = getelementptr float, ptr %p, i32 4095
+  %val_unsigned = load atomic float, ptr %ptr_unsigned monotonic, align 4
 ; CHECK: ldr {{s[0-9]+}}, [x0, #16380]
 
-  %ptr_regoff = getelementptr float, float* %p, i32 %off32
-  %val_regoff = load atomic float, float* %ptr_regoff unordered, align 4
+  %ptr_regoff = getelementptr float, ptr %p, i32 %off32
+  %val_regoff = load atomic float, ptr %ptr_regoff unordered, align 4
   %tot1 = fadd float %val_unsigned, %val_regoff
 ; CHECK: ldr {{s[0-9]+}}, [x0, w1, sxtw #2]
 
-  %ptr_regoff64 = getelementptr float, float* %p, i64 %off64
-  %val_regoff64 = load atomic float, float* %ptr_regoff64 monotonic, align 4
+  %ptr_regoff64 = getelementptr float, ptr %p, i64 %off64
+  %val_regoff64 = load atomic float, ptr %ptr_regoff64 monotonic, align 4
   %tot2 = fadd float %tot1, %val_regoff64
 ; CHECK: ldr {{s[0-9]+}}, [x0, x2, lsl #2]
 
-  %ptr_unscaled = getelementptr float, float* %p, i32 -64
-  %val_unscaled = load atomic float, float* %ptr_unscaled unordered, align 4
+  %ptr_unscaled = getelementptr float, ptr %p, i32 -64
+  %val_unscaled = load atomic float, ptr %ptr_unscaled unordered, align 4
   %tot3 = fadd float %tot2, %val_unscaled
 ; CHECK: ldur {{s[0-9]+}}, [x0, #-256]
 
   ret float %tot3
 }
 
-define double @atomic_load_relaxed_f64(double* %p, i32 %off32, i64 %off64) #0 {
+define double @atomic_load_relaxed_f64(ptr %p, i32 %off32, i64 %off64) #0 {
 ; CHECK-LABEL: atomic_load_relaxed_f64:
-  %ptr_unsigned = getelementptr double, double* %p, i32 4095
-  %val_unsigned = load atomic double, double* %ptr_unsigned monotonic, align 8
+  %ptr_unsigned = getelementptr double, ptr %p, i32 4095
+  %val_unsigned = load atomic double, ptr %ptr_unsigned monotonic, align 8
 ; CHECK: ldr {{d[0-9]+}}, [x0, #32760]
 
-  %ptr_regoff = getelementptr double, double* %p, i32 %off32
-  %val_regoff = load atomic double, double* %ptr_regoff unordered, align 8
+  %ptr_regoff = getelementptr double, ptr %p, i32 %off32
+  %val_regoff = load atomic double, ptr %ptr_regoff unordered, align 8
   %tot1 = fadd double %val_unsigned, %val_regoff
 ; CHECK: ldr {{d[0-9]+}}, [x0, w1, sxtw #3]
 
-  %ptr_regoff64 = getelementptr double, double* %p, i64 %off64
-  %val_regoff64 = load atomic double, double* %ptr_regoff64 monotonic, align 8
+  %ptr_regoff64 = getelementptr double, ptr %p, i64 %off64
+  %val_regoff64 = load atomic double, ptr %ptr_regoff64 monotonic, align 8
   %tot2 = fadd double %tot1, %val_regoff64
 ; CHECK: ldr {{d[0-9]+}}, [x0, x2, lsl #3]
 
-  %ptr_unscaled = getelementptr double, double* %p, i32 -32
-  %val_unscaled = load atomic double, double* %ptr_unscaled unordered, align 8
+  %ptr_unscaled = getelementptr double, ptr %p, i32 -32
+  %val_unscaled = load atomic double, ptr %ptr_unscaled unordered, align 8
   %tot3 = fadd double %tot2, %val_unscaled
 ; CHECK: ldur {{d[0-9]+}}, [x0, #-256]
 
   ret double %tot3
 }
 
-define void @atomic_store_relaxed_f32(float* %p, i32 %off32, i64 %off64, float %val) #0 {
+define void @atomic_store_relaxed_f32(ptr %p, i32 %off32, i64 %off64, float %val) #0 {
 ; CHECK-LABEL: atomic_store_relaxed_f32:
-  %ptr_unsigned = getelementptr float, float* %p, i32 4095
-  store atomic float %val, float* %ptr_unsigned monotonic, align 4
+  %ptr_unsigned = getelementptr float, ptr %p, i32 4095
+  store atomic float %val, ptr %ptr_unsigned monotonic, align 4
 ; CHECK: str {{s[0-9]+}}, [x0, #16380]
 
-  %ptr_regoff = getelementptr float, float* %p, i32 %off32
-  store atomic float %val, float* %ptr_regoff unordered, align 4
+  %ptr_regoff = getelementptr float, ptr %p, i32 %off32
+  store atomic float %val, ptr %ptr_regoff unordered, align 4
 ; CHECK: str {{s[0-9]+}}, [x0, w1, sxtw #2]
 
-  %ptr_regoff64 = getelementptr float, float* %p, i64 %off64
-  store atomic float %val, float* %ptr_regoff64 monotonic, align 4
+  %ptr_regoff64 = getelementptr float, ptr %p, i64 %off64
+  store atomic float %val, ptr %ptr_regoff64 monotonic, align 4
 ; CHECK: str {{s[0-9]+}}, [x0, x2, lsl #2]
 
-  %ptr_unscaled = getelementptr float, float* %p, i32 -64
-  store atomic float %val, float* %ptr_unscaled unordered, align 4
+  %ptr_unscaled = getelementptr float, ptr %p, i32 -64
+  store atomic float %val, ptr %ptr_unscaled unordered, align 4
 ; CHECK: stur {{s[0-9]+}}, [x0, #-256]
 
   ret void
 }
 
-define void @atomic_store_relaxed_f64(double* %p, i32 %off32, i64 %off64, double %val) #0 {
+define void @atomic_store_relaxed_f64(ptr %p, i32 %off32, i64 %off64, double %val) #0 {
 ; CHECK-LABEL: atomic_store_relaxed_f64:
-  %ptr_unsigned = getelementptr double, double* %p, i32 4095
-  store atomic double %val, double* %ptr_unsigned monotonic, align 8
+  %ptr_unsigned = getelementptr double, ptr %p, i32 4095
+  store atomic double %val, ptr %ptr_unsigned monotonic, align 8
 ; CHECK: str {{d[0-9]+}}, [x0, #32760]
 
-  %ptr_regoff = getelementptr double, double* %p, i32 %off32
-  store atomic double %val, double* %ptr_regoff unordered, align 8
+  %ptr_regoff = getelementptr double, ptr %p, i32 %off32
+  store atomic double %val, ptr %ptr_regoff unordered, align 8
 ; CHECK: str {{d[0-9]+}}, [x0, w1, sxtw #3]
 
-  %ptr_regoff64 = getelementptr double, double* %p, i64 %off64
-  store atomic double %val, double* %ptr_regoff64 unordered, align 8
+  %ptr_regoff64 = getelementptr double, ptr %p, i64 %off64
+  store atomic double %val, ptr %ptr_regoff64 unordered, align 8
 ; CHECK: str {{d[0-9]+}}, [x0, x2, lsl #3]
 
-  %ptr_unscaled = getelementptr double, double* %p, i32 -32
-  store atomic double %val, double* %ptr_unscaled monotonic, align 8
+  %ptr_unscaled = getelementptr double, ptr %p, i32 -32
+  store atomic double %val, ptr %ptr_unscaled monotonic, align 8
 ; CHECK: stur {{d[0-9]+}}, [x0, #-256]
 
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/remat.ll b/llvm/test/CodeGen/AArch64/remat.ll
index 062aa47b2578f..513311b1ee272 100644
--- a/llvm/test/CodeGen/AArch64/remat.ll
+++ b/llvm/test/CodeGen/AArch64/remat.ll
@@ -27,15 +27,15 @@
 ; RUN: llc -mtriple=aarch64-linux-gnuabi -mcpu=ampere1 -o - %s | FileCheck %s
 
 %X = type { i64, i64, i64 }
-declare void @f(%X*)
+declare void @f(ptr)
 define void @t() {
 entry:
   %tmp = alloca %X
-  call void @f(%X* %tmp)
+  call void @f(ptr %tmp)
 ; CHECK: add x0, sp, #8
 ; CHECK-NOT: mov
 ; CHECK-NEXT: bl f
-  call void @f(%X* %tmp)
+  call void @f(ptr %tmp)
 ; CHECK: add x0, sp, #8
 ; CHECK-NOT: mov
 ; CHECK-NEXT: bl f

diff  --git a/llvm/test/CodeGen/AArch64/reserveXreg.ll b/llvm/test/CodeGen/AArch64/reserveXreg.ll
index 61b9af072d2d3..e0f21550cd8e3 100644
--- a/llvm/test/CodeGen/AArch64/reserveXreg.ll
+++ b/llvm/test/CodeGen/AArch64/reserveXreg.ll
@@ -4,7 +4,7 @@
 
 ; LR, FP, X30 and X29 should be correctly recognized and not used.
 
-define void @foo(i64 %v1, i64 %v2, i64* %ptr) {
+define void @foo(i64 %v1, i64 %v2, ptr %ptr) {
 ; CHECK-LABEL: foo:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -23,21 +23,21 @@ define void @foo(i64 %v1, i64 %v2, i64* %ptr) {
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %v3 = add i64 %v1, %v2
-  %p1 = getelementptr i64, i64* %ptr, i64 1
-  store volatile i64 %v3, i64* %p1, align 8
+  %p1 = getelementptr i64, ptr %ptr, i64 1
+  store volatile i64 %v3, ptr %p1, align 8
 
-  %p2 = getelementptr i64, i64* %ptr, i64 2
-  %v4 = load volatile i64, i64* %p2, align 8
+  %p2 = getelementptr i64, ptr %ptr, i64 2
+  %v4 = load volatile i64, ptr %p2, align 8
   %v5 = add i64 %v1, %v4
   %v6 = sub i64 %v5, %v2
-  store volatile i64 %v6, i64* %p2, align 8
+  store volatile i64 %v6, ptr %p2, align 8
 
-  %p3 = getelementptr i64, i64* %ptr, i64 3
-  store volatile i64 %v3, i64* %p3, align 8
+  %p3 = getelementptr i64, ptr %ptr, i64 3
+  store volatile i64 %v3, ptr %p3, align 8
 
-  %p4 = getelementptr i64, i64* %ptr, i64 4
-  store volatile i64 %v1, i64* %p4, align 8
-  %p5 = getelementptr i64, i64* %ptr, i64 5
-  store volatile i64 %v2, i64* %p5, align 8
+  %p4 = getelementptr i64, ptr %ptr, i64 4
+  store volatile i64 %v1, ptr %p4, align 8
+  %p5 = getelementptr i64, ptr %ptr, i64 5
+  store volatile i64 %v2, ptr %p5, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/returnaddr.ll b/llvm/test/CodeGen/AArch64/returnaddr.ll
index 3f74bfb4bcf7e..d90a1db5a332b 100644
--- a/llvm/test/CodeGen/AArch64/returnaddr.ll
+++ b/llvm/test/CodeGen/AArch64/returnaddr.ll
@@ -1,15 +1,15 @@
 ; RUN: llc -o - %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
-define i8* @rt0(i32 %x) nounwind readnone {
+define ptr @rt0(i32 %x) nounwind readnone {
 entry:
 ; CHECK-LABEL: rt0:
 ; CHECK: hint #7
 ; CHECK: mov x0, x30
-  %0 = tail call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 }
 
-define i8* @rt2() nounwind readnone {
+define ptr @rt2() nounwind readnone {
 entry:
 ; CHECK-LABEL: rt2:
 ; CHECK: ldr x[[reg:[0-9]+]], [x29]
@@ -17,8 +17,8 @@ entry:
 ; CHECK: ldr x30, [x[[reg]], #8]
 ; CHECK: hint #7
 ; CHECK: mov x0, x30
-  %0 = tail call i8* @llvm.returnaddress(i32 2)
-  ret i8* %0
+  %0 = tail call ptr @llvm.returnaddress(i32 2)
+  ret ptr %0
 }
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone

diff  --git a/llvm/test/CodeGen/AArch64/rm_redundant_cmp.ll b/llvm/test/CodeGen/AArch64/rm_redundant_cmp.ll
index 6864bf5639b59..3240ef0a40ddc 100644
--- a/llvm/test/CodeGen/AArch64/rm_redundant_cmp.ll
+++ b/llvm/test/CodeGen/AArch64/rm_redundant_cmp.ll
@@ -23,13 +23,13 @@ define void @test_i16_2cmp_signed_1() {
 ; CHECK-NEXT:  .LBB0_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 1), align 2
-  %1 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 2), align 2
+  %0 = load i16, ptr getelementptr inbounds (%struct.s_signed_i16, ptr @cost_s_i8_i16, i64 0, i32 1), align 2
+  %1 = load i16, ptr getelementptr inbounds (%struct.s_signed_i16, ptr @cost_s_i8_i16, i64 0, i32 2), align 2
   %cmp = icmp sgt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i16 %0, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 0), align 2
+  store i16 %0, ptr @cost_s_i8_i16, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -37,7 +37,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i16 %0, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 0), align 2
+  store i16 %0, ptr @cost_s_i8_i16, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then
@@ -61,13 +61,13 @@ define void @test_i16_2cmp_signed_2() {
 ; CHECK-NEXT:  .LBB1_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 1), align 2
-  %1 = load i16, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 2), align 2
+  %0 = load i16, ptr getelementptr inbounds (%struct.s_signed_i16, ptr @cost_s_i8_i16, i64 0, i32 1), align 2
+  %1 = load i16, ptr getelementptr inbounds (%struct.s_signed_i16, ptr @cost_s_i8_i16, i64 0, i32 2), align 2
   %cmp = icmp sgt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i16 %0, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 0), align 2
+  store i16 %0, ptr @cost_s_i8_i16, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -75,7 +75,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i16 %1, i16* getelementptr inbounds (%struct.s_signed_i16, %struct.s_signed_i16* @cost_s_i8_i16, i64 0, i32 0), align 2
+  store i16 %1, ptr @cost_s_i8_i16, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then
@@ -96,13 +96,13 @@ define void @test_i16_2cmp_unsigned_1() {
 ; CHECK-NEXT:  .LBB2_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 1), align 2
-  %1 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 2), align 2
+  %0 = load i16, ptr getelementptr inbounds (%struct.s_unsigned_i16, ptr @cost_u_i16, i64 0, i32 1), align 2
+  %1 = load i16, ptr getelementptr inbounds (%struct.s_unsigned_i16, ptr @cost_u_i16, i64 0, i32 2), align 2
   %cmp = icmp ugt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i16 %0, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 0), align 2
+  store i16 %0, ptr @cost_u_i16, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -110,7 +110,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i16 %0, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 0), align 2
+  store i16 %0, ptr @cost_u_i16, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then
@@ -132,13 +132,13 @@ define void @test_i16_2cmp_unsigned_2() {
 ; CHECK-NEXT:  .LBB3_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 1), align 2
-  %1 = load i16, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 2), align 2
+  %0 = load i16, ptr getelementptr inbounds (%struct.s_unsigned_i16, ptr @cost_u_i16, i64 0, i32 1), align 2
+  %1 = load i16, ptr getelementptr inbounds (%struct.s_unsigned_i16, ptr @cost_u_i16, i64 0, i32 2), align 2
   %cmp = icmp ugt i16 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i16 %0, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 0), align 2
+  store i16 %0, ptr @cost_u_i16, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -146,7 +146,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i16 %1, i16* getelementptr inbounds (%struct.s_unsigned_i16, %struct.s_unsigned_i16* @cost_u_i16, i64 0, i32 0), align 2
+  store i16 %1, ptr @cost_u_i16, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then
@@ -176,13 +176,13 @@ define void @test_i8_2cmp_signed_1() {
 ; CHECK-NEXT:  .LBB4_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 1), align 2
-  %1 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 2), align 2
+  %0 = load i8, ptr getelementptr inbounds (%struct.s_signed_i8, ptr @cost_s, i64 0, i32 1), align 2
+  %1 = load i8, ptr getelementptr inbounds (%struct.s_signed_i8, ptr @cost_s, i64 0, i32 2), align 2
   %cmp = icmp sgt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i8 %0, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 0), align 2
+  store i8 %0, ptr @cost_s, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -190,7 +190,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i8 %0, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 0), align 2
+  store i8 %0, ptr @cost_s, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then
@@ -214,13 +214,13 @@ define void @test_i8_2cmp_signed_2() {
 ; CHECK-NEXT:  .LBB5_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 1), align 2
-  %1 = load i8, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 2), align 2
+  %0 = load i8, ptr getelementptr inbounds (%struct.s_signed_i8, ptr @cost_s, i64 0, i32 1), align 2
+  %1 = load i8, ptr getelementptr inbounds (%struct.s_signed_i8, ptr @cost_s, i64 0, i32 2), align 2
   %cmp = icmp sgt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i8 %0, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 0), align 2
+  store i8 %0, ptr @cost_s, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -228,7 +228,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i8 %1, i8* getelementptr inbounds (%struct.s_signed_i8, %struct.s_signed_i8* @cost_s, i64 0, i32 0), align 2
+  store i8 %1, ptr @cost_s, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then
@@ -249,13 +249,13 @@ define void @test_i8_2cmp_unsigned_1() {
 ; CHECK-NEXT:  .LBB6_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 1), align 2
-  %1 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 2), align 2
+  %0 = load i8, ptr getelementptr inbounds (%struct.s_unsigned_i8, ptr @cost_u_i8, i64 0, i32 1), align 2
+  %1 = load i8, ptr getelementptr inbounds (%struct.s_unsigned_i8, ptr @cost_u_i8, i64 0, i32 2), align 2
   %cmp = icmp ugt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i8 %0, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 0), align 2
+  store i8 %0, ptr @cost_u_i8, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -263,7 +263,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i8 %0, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 0), align 2
+  store i8 %0, ptr @cost_u_i8, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then
@@ -285,13 +285,13 @@ define void @test_i8_2cmp_unsigned_2() {
 ; CHECK-NEXT:  .LBB7_2: // %if.end8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 1), align 2
-  %1 = load i8, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 2), align 2
+  %0 = load i8, ptr getelementptr inbounds (%struct.s_unsigned_i8, ptr @cost_u_i8, i64 0, i32 1), align 2
+  %1 = load i8, ptr getelementptr inbounds (%struct.s_unsigned_i8, ptr @cost_u_i8, i64 0, i32 2), align 2
   %cmp = icmp ugt i8 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i8 %0, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 0), align 2
+  store i8 %0, ptr @cost_u_i8, align 2
   br label %if.end8
 
 if.else:                                          ; preds = %entry
@@ -299,7 +299,7 @@ if.else:                                          ; preds = %entry
   br i1 %cmp5, label %if.then7, label %if.end8
 
 if.then7:                                         ; preds = %if.else
-  store i8 %1, i8* getelementptr inbounds (%struct.s_unsigned_i8, %struct.s_unsigned_i8* @cost_u_i8, i64 0, i32 0), align 2
+  store i8 %1, ptr @cost_u_i8, align 2
   br label %if.end8
 
 if.end8:                                          ; preds = %if.else, %if.then7, %if.then

diff  --git a/llvm/test/CodeGen/AArch64/rotate.ll b/llvm/test/CodeGen/AArch64/rotate.ll
index 5ac86d5f59c9d..97d20f2a372fe 100644
--- a/llvm/test/CodeGen/AArch64/rotate.ll
+++ b/llvm/test/CodeGen/AArch64/rotate.ll
@@ -2,11 +2,11 @@
 
 ;; This used to cause a backend crash about not being able to
 ;; select ROTL. Make sure if generates the basic ushr/shl.
-define <2 x i64> @testcase(<2 x i64>* %in) {
+define <2 x i64> @testcase(ptr %in) {
 ; CHECK-LABEL: testcase
 ; CHECK: ushr {{v[0-9]+}}.2d
 ; CHECK: shl  {{v[0-9]+}}.2d
-  %1 = load <2 x i64>, <2 x i64>* %in
+  %1 = load <2 x i64>, ptr %in
   %2 = lshr <2 x i64> %1, <i64 8, i64 8>
   %3 = shl <2 x i64> %1, <i64 56, i64 56>
   %4 = or <2 x i64> %2, %3

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index 0f3810c818281..5e39a0196d74a 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -94,7 +94,7 @@ define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
   ret <32 x i16> %z
 }
 
-define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+define void @v8i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -102,14 +102,14 @@ define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    sqadd v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <8 x i8>, <8 x i8>* %px
-  %y = load <8 x i8>, <8 x i8>* %py
+  %x = load <8 x i8>, ptr %px
+  %y = load <8 x i8>, ptr %py
   %z = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
-  store <8 x i8> %z, <8 x i8>* %pz
+  store <8 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -123,14 +123,14 @@ define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-NEXT:    str s0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %px
-  %y = load <4 x i8>, <4 x i8>* %py
+  %x = load <4 x i8>, ptr %px
+  %y = load <4 x i8>, ptr %py
   %z = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
-  store <4 x i8> %z, <4 x i8>* %pz
+  store <4 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.b }[0], [x1]
@@ -148,14 +148,14 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    strb w9, [x2]
 ; CHECK-NEXT:    strb w8, [x2, #1]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i8>, <2 x i8>* %px
-  %y = load <2 x i8>, <2 x i8>* %py
+  %x = load <2 x i8>, ptr %px
+  %y = load <2 x i8>, ptr %py
   %z = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
-  store <2 x i8> %z, <2 x i8>* %pz
+  store <2 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+define void @v4i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -163,14 +163,14 @@ define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    sqadd v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i16>, <4 x i16>* %px
-  %y = load <4 x i16>, <4 x i16>* %py
+  %x = load <4 x i16>, ptr %px
+  %y = load <4 x i16>, ptr %py
   %z = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
-  store <4 x i16> %z, <4 x i16>* %pz
+  store <4 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+define void @v2i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.h }[0], [x1]
@@ -188,10 +188,10 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    strh w9, [x2]
 ; CHECK-NEXT:    strh w8, [x2, #2]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i16>, <2 x i16>* %px
-  %y = load <2 x i16>, <2 x i16>* %py
+  %x = load <2 x i16>, ptr %px
+  %y = load <2 x i16>, ptr %py
   %z = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
-  store <2 x i16> %z, <2 x i16>* %pz
+  store <2 x i16> %z, ptr %pz
   ret void
 }
 
@@ -204,7 +204,7 @@ define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
   ret <12 x i8> %z
 }
 
-define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+define void @v12i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v12i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q3, [x1]
@@ -214,14 +214,14 @@ define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind
 ; CHECK-NEXT:    str q0, [x2]
 ; CHECK-NEXT:    str d1, [x2, #16]
 ; CHECK-NEXT:    ret
-  %x = load <12 x i16>, <12 x i16>* %px
-  %y = load <12 x i16>, <12 x i16>* %py
+  %x = load <12 x i16>, ptr %px
+  %y = load <12 x i16>, ptr %py
   %z = call <12 x i16> @llvm.sadd.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
-  store <12 x i16> %z, <12 x i16>* %pz
+  store <12 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr b0, [x1]
@@ -229,14 +229,14 @@ define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    sqadd v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    st1 { v0.b }[0], [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i8>, <1 x i8>* %px
-  %y = load <1 x i8>, <1 x i8>* %py
+  %x = load <1 x i8>, ptr %px
+  %y = load <1 x i8>, ptr %py
   %z = call <1 x i8> @llvm.sadd.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
-  store <1 x i8> %z, <1 x i8>* %pz
+  store <1 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+define void @v1i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x1]
@@ -244,10 +244,10 @@ define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    sqadd v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str h0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i16>, <1 x i16>* %px
-  %y = load <1 x i16>, <1 x i16>* %py
+  %x = load <1 x i16>, ptr %px
+  %y = load <1 x i16>, ptr %py
   %z = call <1 x i16> @llvm.sadd.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
-  store <1 x i16> %z, <1 x i16>* %pz
+  store <1 x i16> %z, ptr %pz
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sched-past-vector-ldst.ll b/llvm/test/CodeGen/AArch64/sched-past-vector-ldst.ll
index 0619e74835f15..cd5383388ce2d 100644
--- a/llvm/test/CodeGen/AArch64/sched-past-vector-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/sched-past-vector-ldst.ll
@@ -15,43 +15,39 @@
 target datalayout = "e-m:e-i64:64-i128:128-n8:16:32:64-S128"
 target triple = "aarch64--linux-gnu"
 
-%Struct = type { i64*, [9 x double], [16 x {float, float}], [16 x {float, float}], i32, i32 }
+%Struct = type { ptr, [9 x double], [16 x {float, float}], [16 x {float, float}], i32, i32 }
 
 ; Function Attrs: nounwind
-define linkonce_odr void @func(%Struct* nocapture %this, <4 x float> %f) unnamed_addr #0 align 2 {
+define linkonce_odr void @func(ptr nocapture %this, <4 x float> %f) unnamed_addr #0 align 2 {
 entry:
-  %scevgep = getelementptr %Struct, %Struct* %this, i64 0, i32 2, i64 8, i32 0
-  %struct_ptr = bitcast float* %scevgep to i8*
-  %vec1 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0i8(i8* %struct_ptr)
+  %scevgep = getelementptr %Struct, ptr %this, i64 0, i32 2, i64 8, i32 0
+  %vec1 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr %scevgep)
   %ev1 = extractvalue { <4 x float>, <4 x float> } %vec1, 1
   %fm1 = fmul <4 x float> %f, %ev1
   %av1 = fadd <4 x float> %f, %fm1
   %ev2 = extractvalue { <4 x float>, <4 x float> } %vec1, 0
   %fm2 = fmul <4 x float> %f, %ev2
   %av2 = fadd <4 x float> %f, %fm2
-  %scevgep2 = getelementptr %Struct, %Struct* %this, i64 0, i32 3, i64 8, i32 0
-  %struct_ptr2 = bitcast float* %scevgep2 to i8*
-  tail call void @llvm.aarch64.neon.st2.v4f32.p0i8(<4 x float> %av2, <4 x float> %av1, i8* %struct_ptr2)
-  %scevgep3 = getelementptr %Struct, %Struct* %this, i64 0, i32 2, i64 12, i32 0
-  %struct_ptr3 = bitcast float* %scevgep3 to i8*
-  %vec2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0i8(i8* %struct_ptr3)
+  %scevgep2 = getelementptr %Struct, ptr %this, i64 0, i32 3, i64 8, i32 0
+  tail call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> %av2, <4 x float> %av1, ptr %scevgep2)
+  %scevgep3 = getelementptr %Struct, ptr %this, i64 0, i32 2, i64 12, i32 0
+  %vec2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr %scevgep3)
   %ev3 = extractvalue { <4 x float>, <4 x float> } %vec2, 1
   %fm3 = fmul <4 x float> %f, %ev3
   %av3 = fadd <4 x float> %f, %fm3
   %ev4 = extractvalue { <4 x float>, <4 x float> } %vec2, 0
   %fm4 = fmul <4 x float> %f, %ev4
   %av4 = fadd <4 x float> %f, %fm4
-  %scevgep4 = getelementptr %Struct, %Struct* %this, i64 0, i32 3, i64 12, i32 0
-  %struct_ptr4 = bitcast float* %scevgep4 to i8*
-  tail call void @llvm.aarch64.neon.st2.v4f32.p0i8(<4 x float> %av4, <4 x float> %av3, i8* %struct_ptr4)
+  %scevgep4 = getelementptr %Struct, ptr %this, i64 0, i32 3, i64 12, i32 0
+  tail call void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float> %av4, <4 x float> %av3, ptr %scevgep4)
   ret void
 }
 
 ; Function Attrs: nounwind readonly
-declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0i8(i8*) #2
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0(ptr) #2
 
 ; Function Attrs: nounwind
-declare void @llvm.aarch64.neon.st2.v4f32.p0i8(<4 x float>, <4 x float>, i8* nocapture) #1
+declare void @llvm.aarch64.neon.st2.v4f32.p0(<4 x float>, <4 x float>, ptr nocapture) #1
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/sdag-store-merging-bug.ll b/llvm/test/CodeGen/AArch64/sdag-store-merging-bug.ll
index d67988de57527..7c5dce458215b 100644
--- a/llvm/test/CodeGen/AArch64/sdag-store-merging-bug.ll
+++ b/llvm/test/CodeGen/AArch64/sdag-store-merging-bug.ll
@@ -10,13 +10,13 @@
 ; store merging immediately merges it back together (but used to get the
 ; merging wrong), this is the only way I was able to reproduce the bug...
 
-define void @func(<2 x double>* %sptr, <2 x double>* %dptr) {
+define void @func(ptr %sptr, ptr %dptr) {
 ; CHECK-LABEL: func:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load <2 x double>, <2 x double>* %sptr, align 8
-  store <2 x double> %load, <2 x double>* %dptr, align 4
+  %load = load <2 x double>, ptr %sptr, align 8
+  store <2 x double> %load, ptr %dptr, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/seh-finally.ll b/llvm/test/CodeGen/AArch64/seh-finally.ll
index 72487e5bf4d7a..581053ec3f56d 100644
--- a/llvm/test/CodeGen/AArch64/seh-finally.ll
+++ b/llvm/test/CodeGen/AArch64/seh-finally.ll
@@ -32,7 +32,7 @@
 %struct.S = type { i32 }
 
 ; Test simple SEH (__try/__finally).
-define void @simple_seh() #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define void @simple_seh() #0 personality ptr @__C_specific_handler {
 entry:
 ; CHECK-LABEL: simple_seh
 ; CHECK: add     x29, sp, #16
@@ -43,25 +43,24 @@ entry:
 ; CHECK: bl      foo
 
   %o = alloca %struct.S, align 4
-  call void (...) @llvm.localescape(%struct.S* %o)
-  %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0
-  %0 = load i32, i32* %x, align 4
+  call void (...) @llvm.localescape(ptr %o)
+  %0 = load i32, ptr %o, align 4
   invoke void @foo(i32 %0) #5
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %1 = call i8* @llvm.localaddress()
-  call void @fin_simple_seh(i8 0, i8* %1)
+  %1 = call ptr @llvm.localaddress()
+  call void @fin_simple_seh(i8 0, ptr %1)
   ret void
 
 ehcleanup:                                        ; preds = %entry
   %2 = cleanuppad within none []
-  %3 = call i8* @llvm.localaddress()
-  call void @fin_simple_seh(i8 1, i8* %3) [ "funclet"(token %2) ]
+  %3 = call ptr @llvm.localaddress()
+  call void @fin_simple_seh(i8 1, ptr %3) [ "funclet"(token %2) ]
   cleanupret from %2 unwind to caller
 }
 
-define void @fin_simple_seh(i8 %abnormal_termination, i8* %frame_pointer) {
+define void @fin_simple_seh(i8 %abnormal_termination, ptr %frame_pointer) {
 entry:
 ; CHECK-LABEL: fin_simple_seh
 ; CHECK: movz    x8, #:abs_g1_s:.Lsimple_seh$frame_escape_0
@@ -70,20 +69,18 @@ entry:
 ; CHECK: ldr     w8, [x1, x8]
 ; CHECK: bl      foo
 
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (void ()* @simple_seh to i8*), i8* %frame_pointer, i32 0)
-  %o = bitcast i8* %0 to %struct.S*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
-  %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0
-  %1 = load i32, i32* %x, align 4
+  %0 = call ptr @llvm.localrecover(ptr @simple_seh, ptr %frame_pointer, i32 0)
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
+  %1 = load i32, ptr %0, align 4
   call void @foo(i32 %1)
   ret void
 }
 
 ; Test SEH when stack realignment is needed in case highly aligned stack objects are present.
-define void @stack_realign() #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define void @stack_realign() #0 personality ptr @__C_specific_handler {
 entry:
 ; CHECK-LABEL: stack_realign
 ; CHECK: add     x29, sp, #8
@@ -97,25 +94,24 @@ entry:
 ; CHECK: bl      foo
 
   %o = alloca %struct.S, align 32
-  call void (...) @llvm.localescape(%struct.S* %o)
-  %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0
-  %0 = load i32, i32* %x, align 32
+  call void (...) @llvm.localescape(ptr %o)
+  %0 = load i32, ptr %o, align 32
   invoke void @foo(i32 %0) #5
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %1 = call i8* @llvm.localaddress()
-  call void @fin_stack_realign(i8 0, i8* %1)
+  %1 = call ptr @llvm.localaddress()
+  call void @fin_stack_realign(i8 0, ptr %1)
   ret void
 
 ehcleanup:                                        ; preds = %entry
   %2 = cleanuppad within none []
-  %3 = call i8* @llvm.localaddress()
-  call void @fin_stack_realign(i8 1, i8* %3) [ "funclet"(token %2) ]
+  %3 = call ptr @llvm.localaddress()
+  call void @fin_stack_realign(i8 1, ptr %3) [ "funclet"(token %2) ]
   cleanupret from %2 unwind to caller
 }
 
-define void @fin_stack_realign(i8 %abnormal_termination, i8* %frame_pointer) {
+define void @fin_stack_realign(i8 %abnormal_termination, ptr %frame_pointer) {
 entry:
 ; CHECK-LABEL: fin_stack_realign
 ; CHECK: movz    x8, #:abs_g1_s:.Lstack_realign$frame_escape_0
@@ -124,20 +120,18 @@ entry:
 ; CHECK: ldr     w8, [x1, x8]
 ; CHECK: bl      foo
 
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (void ()* @stack_realign to i8*), i8* %frame_pointer, i32 0)
-  %o = bitcast i8* %0 to %struct.S*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
-  %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0
-  %1 = load i32, i32* %x, align 32
+  %0 = call ptr @llvm.localrecover(ptr @stack_realign, ptr %frame_pointer, i32 0)
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
+  %1 = load i32, ptr %0, align 32
   call void @foo(i32 %1)
   ret void
 }
 
 ; Test SEH when variable size objects are present on the stack. Note: Escaped vla's are current not supported by SEH.
-define void @vla_present(i32 %n) #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define void @vla_present(i32 %n) #0 personality ptr @__C_specific_handler {
 entry:
 ; CHECK-LABEL: vla_present
 ; CHECK: add     x29, sp, #32
@@ -153,35 +147,35 @@ entry:
 ; CHECK: bl      foo
 
   %n.addr = alloca i32, align 4
-  %saved_stack = alloca i8*, align 8
+  %saved_stack = alloca ptr, align 8
   %__vla_expr0 = alloca i64, align 8
-  call void (...) @llvm.localescape(i32* %n.addr)
-  store i32 %n, i32* %n.addr, align 4
-  %0 = load i32, i32* %n.addr, align 4
+  call void (...) @llvm.localescape(ptr %n.addr)
+  store i32 %n, ptr %n.addr, align 4
+  %0 = load i32, ptr %n.addr, align 4
   %1 = zext i32 %0 to i64
-  %2 = call i8* @llvm.stacksave()
-  store i8* %2, i8** %saved_stack, align 8
+  %2 = call ptr @llvm.stacksave()
+  store ptr %2, ptr %saved_stack, align 8
   %vla = alloca i32, i64 %1, align 4
-  store i64 %1, i64* %__vla_expr0, align 8
-  %3 = load i32, i32* %n.addr, align 4
+  store i64 %1, ptr %__vla_expr0, align 8
+  %3 = load i32, ptr %n.addr, align 4
   invoke void @foo(i32 %3) #5
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %4 = call i8* @llvm.localaddress()
-  call void @fin_vla_present(i8 0, i8* %4)
-  %5 = load i8*, i8** %saved_stack, align 8
-  call void @llvm.stackrestore(i8* %5)
+  %4 = call ptr @llvm.localaddress()
+  call void @fin_vla_present(i8 0, ptr %4)
+  %5 = load ptr, ptr %saved_stack, align 8
+  call void @llvm.stackrestore(ptr %5)
   ret void
 
 ehcleanup:                                        ; preds = %entry
   %6 = cleanuppad within none []
-  %7 = call i8* @llvm.localaddress()
-  call void @fin_vla_present(i8 1, i8* %7) [ "funclet"(token %6) ]
+  %7 = call ptr @llvm.localaddress()
+  call void @fin_vla_present(i8 1, ptr %7) [ "funclet"(token %6) ]
   cleanupret from %6 unwind to caller
 }
 
-define void @fin_vla_present(i8 %abnormal_termination, i8* %frame_pointer) {
+define void @fin_vla_present(i8 %abnormal_termination, ptr %frame_pointer) {
 entry:
 ; CHECK-LABEL: fin_vla_present
 ; CHECK: movz    x8, #:abs_g1_s:.Lvla_present$frame_escape_0
@@ -190,19 +184,18 @@ entry:
 ; CHECK: ldr     w8, [x1, x8]
 ; CHECK: bl      foo
 
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (void (i32)* @vla_present to i8*), i8* %frame_pointer, i32 0)
-  %n.addr = bitcast i8* %0 to i32*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
-  %1 = load i32, i32* %n.addr, align 4
+  %0 = call ptr @llvm.localrecover(ptr @vla_present, ptr %frame_pointer, i32 0)
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
+  %1 = load i32, ptr %0, align 4
   call void @foo(i32 %1)
   ret void
 }
 
 ; Test when both vla's and highly aligned objects are present on stack.
-define void @vla_and_realign(i32 %n) #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define void @vla_and_realign(i32 %n) #0 personality ptr @__C_specific_handler {
 entry:
 ; CHECK-LABEL: vla_and_realign
 ; CHECK: add     x29, sp, #8
@@ -222,36 +215,35 @@ entry:
 
   %n.addr = alloca i32, align 4
   %o = alloca %struct.S, align 32
-  %saved_stack = alloca i8*, align 8
+  %saved_stack = alloca ptr, align 8
   %__vla_expr0 = alloca i64, align 8
-  call void (...) @llvm.localescape(%struct.S* %o)
-  store i32 %n, i32* %n.addr, align 4
-  %0 = load i32, i32* %n.addr, align 4
+  call void (...) @llvm.localescape(ptr %o)
+  store i32 %n, ptr %n.addr, align 4
+  %0 = load i32, ptr %n.addr, align 4
   %1 = zext i32 %0 to i64
-  %2 = call i8* @llvm.stacksave()
-  store i8* %2, i8** %saved_stack, align 8
+  %2 = call ptr @llvm.stacksave()
+  store ptr %2, ptr %saved_stack, align 8
   %vla = alloca i32, i64 %1, align 4
-  store i64 %1, i64* %__vla_expr0, align 8
-  %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0
-  %3 = load i32, i32* %x, align 32
+  store i64 %1, ptr %__vla_expr0, align 8
+  %3 = load i32, ptr %o, align 32
   invoke void @foo(i32 %3) #5
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %4 = call i8* @llvm.localaddress()
-  call void @fin_vla_and_realign(i8 0, i8* %4)
-  %5 = load i8*, i8** %saved_stack, align 8
-  call void @llvm.stackrestore(i8* %5)
+  %4 = call ptr @llvm.localaddress()
+  call void @fin_vla_and_realign(i8 0, ptr %4)
+  %5 = load ptr, ptr %saved_stack, align 8
+  call void @llvm.stackrestore(ptr %5)
   ret void
 
 ehcleanup:                                        ; preds = %entry
   %6 = cleanuppad within none []
-  %7 = call i8* @llvm.localaddress()
-  call void @fin_vla_and_realign(i8 1, i8* %7) [ "funclet"(token %6) ]
+  %7 = call ptr @llvm.localaddress()
+  call void @fin_vla_and_realign(i8 1, ptr %7) [ "funclet"(token %6) ]
   cleanupret from %6 unwind to caller
 }
 
-define void @fin_vla_and_realign(i8 %abnormal_termination, i8* %frame_pointer) {
+define void @fin_vla_and_realign(i8 %abnormal_termination, ptr %frame_pointer) {
 entry:
 ; CHECK-LABEL: fin_vla_and_realign
 ; CHECK: movz    x8, #:abs_g1_s:.Lvla_and_realign$frame_escape_0
@@ -260,23 +252,21 @@ entry:
 ; CHECK: ldr     w8, [x1, x8]
 ; CHECK: bl      foo
 
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (void (i32)* @vla_and_realign to i8*), i8* %frame_pointer, i32 0)
-  %o = bitcast i8* %0 to %struct.S*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
-  %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0
-  %1 = load i32, i32* %x, align 32
+  %0 = call ptr @llvm.localrecover(ptr @vla_and_realign, ptr %frame_pointer, i32 0)
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
+  %1 = load i32, ptr %0, align 32
   call void @foo(i32 %1)
   ret void
 }
 
 declare void @foo(i32)
-declare void @llvm.stackrestore(i8*)
-declare i8* @llvm.stacksave()
-declare i8* @llvm.localrecover(i8*, i8*, i32)
-declare i8* @llvm.localaddress()
+declare void @llvm.stackrestore(ptr)
+declare ptr @llvm.stacksave()
+declare ptr @llvm.localrecover(ptr, ptr, i32)
+declare ptr @llvm.localaddress()
 declare void @llvm.localescape(...)
 declare i32 @__C_specific_handler(...)
 

diff  --git a/llvm/test/CodeGen/AArch64/seh_funclet_x1.ll b/llvm/test/CodeGen/AArch64/seh_funclet_x1.ll
index 7f5a0324f9c00..abb924933b903 100644
--- a/llvm/test/CodeGen/AArch64/seh_funclet_x1.ll
+++ b/llvm/test/CodeGen/AArch64/seh_funclet_x1.ll
@@ -14,20 +14,20 @@ target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.15.26732"
 
 ; Function Attrs: noinline nounwind optnone uwtable
-define dso_local i32 @main() #0 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define dso_local i32 @main() #0 personality ptr @__C_specific_handler {
 entry:
   %retval = alloca i32, align 4
   %Counter = alloca i32, align 4
   %__exception_code = alloca i32, align 4
-  call void (...) @llvm.localescape(i32* %Counter)
-  store i32 0, i32* %retval, align 4
-  store i32 0, i32* %Counter, align 4
-  %call = invoke i32 bitcast (i32 (...)* @RaiseStatus to i32 (i32)*)(i32 -1073741675) #3
+  call void (...) @llvm.localescape(ptr %Counter)
+  store i32 0, ptr %retval, align 4
+  store i32 0, ptr %Counter, align 4
+  %call = invoke i32 @RaiseStatus(i32 -1073741675) #3
           to label %invoke.cont unwind label %ehcleanup
 
 invoke.cont:                                      ; preds = %entry
-  %0 = call i8* @llvm.localaddress()
-  invoke void @"?fin$0 at 0@main@@"(i8 0, i8* %0) #3
+  %0 = call ptr @llvm.localaddress()
+  invoke void @"?fin$0 at 0@main@@"(i8 0, ptr %0) #3
           to label %invoke.cont1 unwind label %catch.dispatch
 
 invoke.cont1:                                     ; preds = %invoke.cont
@@ -35,8 +35,8 @@ invoke.cont1:                                     ; preds = %invoke.cont
 
 ehcleanup:                                        ; preds = %entry
   %1 = cleanuppad within none []
-  %2 = call i8* @llvm.localaddress()
-  invoke void @"?fin$0 at 0@main@@"(i8 1, i8* %2) #3 [ "funclet"(token %1) ]
+  %2 = call ptr @llvm.localaddress()
+  invoke void @"?fin$0 at 0@main@@"(i8 1, ptr %2) #3 [ "funclet"(token %1) ]
           to label %invoke.cont2 unwind label %catch.dispatch
 
 invoke.cont2:                                     ; preds = %ehcleanup
@@ -46,58 +46,56 @@ catch.dispatch:                                   ; preds = %invoke.cont2, %ehcl
   %3 = catchswitch within none [label %__except] unwind to caller
 
 __except:                                         ; preds = %catch.dispatch
-  %4 = catchpad within %3 [i8* null]
+  %4 = catchpad within %3 [ptr null]
   catchret from %4 to label %__except3
 
 __except3:                                        ; preds = %__except
   %5 = call i32 @llvm.eh.exceptioncode(token %4)
-  store i32 %5, i32* %__exception_code, align 4
-  %6 = load i32, i32* %Counter, align 4
+  store i32 %5, ptr %__exception_code, align 4
+  %6 = load i32, ptr %Counter, align 4
   %add = add nsw i32 %6, 5
-  store i32 %add, i32* %Counter, align 4
+  store i32 %add, ptr %Counter, align 4
   br label %__try.cont
 
 __try.cont:                                       ; preds = %__except3, %invoke.cont1
-  %7 = load i32, i32* %retval, align 4
+  %7 = load i32, ptr %retval, align 4
   ret i32 %7
 }
 
-define internal void @"?fin$0 at 0@main@@"(i8 %abnormal_termination, i8* %frame_pointer) {
+define internal void @"?fin$0 at 0@main@@"(i8 %abnormal_termination, ptr %frame_pointer) {
 entry:
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %frame_pointer, i32 0)
-  %Counter = bitcast i8* %0 to i32*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
-  store i32 3, i32* %Counter, align 4
-  call void @"?fin$1 at 0@main@@"(i8 0, i8* %frame_pointer)
-  %1 = load i32, i32* %Counter, align 4
+  %0 = call ptr @llvm.localrecover(ptr @main, ptr %frame_pointer, i32 0)
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
+  store i32 3, ptr %0, align 4
+  call void @"?fin$1 at 0@main@@"(i8 0, ptr %frame_pointer)
+  %1 = load i32, ptr %0, align 4
   %add = add nsw i32 %1, 2
-  store i32 %add, i32* %Counter, align 4
-  %call = call i32 bitcast (i32 (...)* @RaiseStatus to i32 (i32)*)(i32 -1073741675)
+  store i32 %add, ptr %0, align 4
+  %call = call i32 @RaiseStatus(i32 -1073741675)
   ret void
 }
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.localrecover(i8*, i8*, i32) #1
+declare ptr @llvm.localrecover(ptr, ptr, i32) #1
 
-define internal void @"?fin$1 at 0@main@@"(i8 %abnormal_termination, i8* %frame_pointer) {
+define internal void @"?fin$1 at 0@main@@"(i8 %abnormal_termination, ptr %frame_pointer) {
 entry:
-  %frame_pointer.addr = alloca i8*, align 8
+  %frame_pointer.addr = alloca ptr, align 8
   %abnormal_termination.addr = alloca i8, align 1
-  %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @main to i8*), i8* %frame_pointer, i32 0)
-  %Counter = bitcast i8* %0 to i32*
-  store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
-  store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
-  %1 = load i32, i32* %Counter, align 4
+  %0 = call ptr @llvm.localrecover(ptr @main, ptr %frame_pointer, i32 0)
+  store ptr %frame_pointer, ptr %frame_pointer.addr, align 8
+  store i8 %abnormal_termination, ptr %abnormal_termination.addr, align 1
+  %1 = load i32, ptr %0, align 4
   %cmp = icmp eq i32 %1, 3
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %2 = load i32, i32* %Counter, align 4
+  %2 = load i32, ptr %0, align 4
   %add = add nsw i32 %2, 1
-  store i32 %add, i32* %Counter, align 4
+  store i32 %add, ptr %0, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
@@ -109,7 +107,7 @@ declare dso_local i32 @RaiseStatus(...)
 declare dso_local i32 @__C_specific_handler(...)
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.localaddress() #1
+declare ptr @llvm.localaddress() #1
 
 ; Function Attrs: nounwind readnone
 declare i32 @llvm.eh.exceptioncode(token) #1

diff  --git a/llvm/test/CodeGen/AArch64/select_cc.ll b/llvm/test/CodeGen/AArch64/select_cc.ll
index e69df568d996b..92c8087518151 100644
--- a/llvm/test/CodeGen/AArch64/select_cc.ll
+++ b/llvm/test/CodeGen/AArch64/select_cc.ll
@@ -53,7 +53,7 @@ entry:
   ret i64 %sel
 }
 
-define <2 x double> @select_olt_load_cmp(<2 x double> %a, <2 x float>* %src) {
+define <2 x double> @select_olt_load_cmp(<2 x double> %a, ptr %src) {
 ; CHECK-LABEL: select_olt_load_cmp:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi d1, #0000000000000000
@@ -63,7 +63,7 @@ define <2 x double> @select_olt_load_cmp(<2 x double> %a, <2 x float>* %src) {
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
 entry:
-  %l = load <2 x float>, <2 x float>* %src, align 4
+  %l = load <2 x float>, ptr %src, align 4
   %cmp = fcmp olt <2 x float> zeroinitializer, %l
   %sel = select <2 x i1> %cmp, <2 x double> %a, <2 x double> zeroinitializer
   ret <2 x double> %sel

diff  --git a/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll b/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
index 7c14664dc7715..cab5674238b40 100644
--- a/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
+++ b/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
@@ -18,7 +18,7 @@ define i64 @test_var() nounwind {
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call i64 asm "adrp $0, $1\0Aldr ${0:w}, [$0, :lo12:$1]\0Aadrp x8, $2\0Aldr w8, [x8, :lo12:$2]\0Aadd $0,x8,$0", "=r,S,S,~{x8}"(i32* nonnull @gv0, i32* nonnull @gv1)
+  %0 = tail call i64 asm "adrp $0, $1\0Aldr ${0:w}, [$0, :lo12:$1]\0Aadrp x8, $2\0Aldr w8, [x8, :lo12:$2]\0Aadd $0,x8,$0", "=r,S,S,~{x8}"(ptr nonnull @gv0, ptr nonnull @gv1)
   ret i64 %0
 }
 
@@ -51,6 +51,6 @@ define i64 @test_fun() nounwind {
 ; CHECK-NEXT:    //NO_APP
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call i64 asm "adrp $0, :got:$1\0Aldr $0, [$0, :got_lo12:$1]\0Aadrp x8, :got:$2\0Aldr x8, [x8, :got_lo12:$2]", "=r,S,S,~{x8}"(void ()* nonnull @fun0, void ()* nonnull @fun1)
+  %0 = tail call i64 asm "adrp $0, :got:$1\0Aldr $0, [$0, :got_lo12:$1]\0Aadrp x8, :got:$2\0Aldr x8, [x8, :got_lo12:$2]", "=r,S,S,~{x8}"(ptr nonnull @fun0, ptr nonnull @fun1)
   ret i64 %0
 }

diff  --git a/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll b/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll
index 86817fa4fa406..cf7b9b173905c 100644
--- a/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll
+++ b/llvm/test/CodeGen/AArch64/setcc-type-mismatch.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s
 
-define void @test_mismatched_setcc(<4 x i22> %l, <4 x i22> %r, <4 x i1>* %addr) {
+define void @test_mismatched_setcc(<4 x i22> %l, <4 x i22> %r, ptr %addr) {
 ; CHECK-LABEL: test_mismatched_setcc:
 ; CHECK: cmeq [[CMP128:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
 ; CHECK: xtn {{v[0-9]+}}.4h, [[CMP128]].4s
 
   %tst = icmp eq <4 x i22> %l, %r
-  store <4 x i1> %tst, <4 x i1>* %addr
+  store <4 x i1> %tst, ptr %addr
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/setjmp-bti-no-enforcement.ll b/llvm/test/CodeGen/AArch64/setjmp-bti-no-enforcement.ll
index 720c0a44e3090..6124d31da62ca 100644
--- a/llvm/test/CodeGen/AArch64/setjmp-bti-no-enforcement.ll
+++ b/llvm/test/CodeGen/AArch64/setjmp-bti-no-enforcement.ll
@@ -13,12 +13,12 @@
 
 ; C source
 ; --------
-; extern int setjmp(void*);
+; extern int setjmp(ptr);
 ; extern void notsetjmp(void);
 ;
 ; void bbb(void) {
 ;   setjmp(0);
-;   int (*fnptr)(void*) = setjmp;
+;   int (*fnptr)(ptr) = setjmp;
 ;   fnptr(0);
 ;   notsetjmp();
 ; }
@@ -33,16 +33,16 @@ define void @bbb() {
 ; NOBTI-NOT: hint #36
 
 entry:
-  %fnptr = alloca i32 (i8*)*, align 8
-  %call = call i32 @setjmp(i8* noundef null) #0
-  store i32 (i8*)* @setjmp, i32 (i8*)** %fnptr, align 8
-  %0 = load i32 (i8*)*, i32 (i8*)** %fnptr, align 8
-  %call1 = call i32 %0(i8* noundef null) #0
+  %fnptr = alloca ptr, align 8
+  %call = call i32 @setjmp(ptr noundef null) #0
+  store ptr @setjmp, ptr %fnptr, align 8
+  %0 = load ptr, ptr %fnptr, align 8
+  %call1 = call i32 %0(ptr noundef null) #0
   call void @notsetjmp()
   ret void
 }
 
-declare i32 @setjmp(i8* noundef) #0
+declare i32 @setjmp(ptr noundef) #0
 declare void @notsetjmp()
 
 attributes #0 = { returns_twice }

diff  --git a/llvm/test/CodeGen/AArch64/setjmp-bti-outliner.ll b/llvm/test/CodeGen/AArch64/setjmp-bti-outliner.ll
index 17b89d8db06d0..36fc2f5b31c14 100644
--- a/llvm/test/CodeGen/AArch64/setjmp-bti-outliner.ll
+++ b/llvm/test/CodeGen/AArch64/setjmp-bti-outliner.ll
@@ -15,7 +15,7 @@
 
 ; C source
 ; --------
-; extern int setjmp(void*);
+; extern int setjmp(ptr);
 ;
 ; int f(int a, int b, int c, int d) {
 ;   setjmp(0);
@@ -39,7 +39,7 @@ define i32 @f(i32 noundef %a, i32 noundef %b, i32 noundef %c, i32 noundef %d) {
 ; NOBTI-NEXT:   bl      OUTLINED_FUNCTION_1
 
 entry:
-  %call = call i32 @setjmp(i8* noundef null) #0
+  %call = call i32 @setjmp(ptr noundef null) #0
   %add = add nsw i32 %b, %a
   %mul = mul nsw i32 %add, %a
   %add1 = add nsw i32 %d, %c
@@ -48,7 +48,7 @@ entry:
   ret i32 %add2
 }
 
-declare i32 @setjmp(i8* noundef) #0
+declare i32 @setjmp(ptr noundef) #0
 
 define i32 @g(i32 noundef %a, i32 noundef %b, i32 noundef %c, i32 noundef %d) {
 ; BTI-LABEL: g:
@@ -62,7 +62,7 @@ define i32 @g(i32 noundef %a, i32 noundef %b, i32 noundef %c, i32 noundef %d) {
 ; NOBTI-NEXT:   bl      OUTLINED_FUNCTION_1
 
 entry:
-  %call = call i32 @setjmp(i8* noundef null) #0
+  %call = call i32 @setjmp(ptr noundef null) #0
   %add = add nsw i32 %b, %a
   %mul = mul nsw i32 %add, %a
   %add1 = add nsw i32 %d, %c

diff  --git a/llvm/test/CodeGen/AArch64/setjmp-bti.ll b/llvm/test/CodeGen/AArch64/setjmp-bti.ll
index 2c279e5415cf6..06c4d4eb49f83 100644
--- a/llvm/test/CodeGen/AArch64/setjmp-bti.ll
+++ b/llvm/test/CodeGen/AArch64/setjmp-bti.ll
@@ -10,12 +10,12 @@
 
 ; C source
 ; --------
-; extern int setjmp(void*);
+; extern int setjmp(ptr);
 ; extern void notsetjmp(void);
 ;
 ; void bbb(void) {
 ;   setjmp(0);
-;   int (*fnptr)(void*) = setjmp;
+;   int (*fnptr)(ptr) = setjmp;
 ;   fnptr(0);
 ;   notsetjmp();
 ; }
@@ -37,16 +37,16 @@ define void @bbb() {
 ; NOBTI:     bl notsetjmp
 ; NOBTI-NOT: hint #36
 entry:
-  %fnptr = alloca i32 (i8*)*, align 8
-  %call = call i32 @setjmp(i8* noundef null) #0
-  store i32 (i8*)* @setjmp, i32 (i8*)** %fnptr, align 8
-  %0 = load i32 (i8*)*, i32 (i8*)** %fnptr, align 8
-  %call1 = call i32 %0(i8* noundef null) #0
+  %fnptr = alloca ptr, align 8
+  %call = call i32 @setjmp(ptr noundef null) #0
+  store ptr @setjmp, ptr %fnptr, align 8
+  %0 = load ptr, ptr %fnptr, align 8
+  %call1 = call i32 %0(ptr noundef null) #0
   call void @notsetjmp()
   ret void
 }
 
-declare i32 @setjmp(i8* noundef) #0
+declare i32 @setjmp(ptr noundef) #0
 declare void @notsetjmp()
 
 attributes #0 = { returns_twice }

diff  --git a/llvm/test/CodeGen/AArch64/settag-merge-order.ll b/llvm/test/CodeGen/AArch64/settag-merge-order.ll
index c4de3c9532e9f..e974a490a1717 100644
--- a/llvm/test/CodeGen/AArch64/settag-merge-order.ll
+++ b/llvm/test/CodeGen/AArch64/settag-merge-order.ll
@@ -1,8 +1,8 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte -aarch64-order-frame-objects=1 | FileCheck %s
 
-declare void @use(i8* %p)
-declare void @llvm.aarch64.settag(i8* %p, i64 %a)
-declare void @llvm.aarch64.settag.zero(i8* %p, i64 %a)
+declare void @use(ptr %p)
+declare void @llvm.aarch64.settag(ptr %p, i64 %a)
+declare void @llvm.aarch64.settag.zero(ptr %p, i64 %a)
 
 ; Two loops of size 256; the second loop updates SP.
 ; After frame reordering, two loops can be merged into one.
@@ -19,11 +19,11 @@ entry:
   %b = alloca i8, i32 32, align 16
   %c = alloca i8, i32 128, align 16
   %c2 = alloca i8, i32 128, align 16
-  call void @use(i8* %b)
-  call void @llvm.aarch64.settag(i8* %a, i64 128)
-  call void @llvm.aarch64.settag(i8* %a2, i64 128)
-  call void @llvm.aarch64.settag(i8* %c, i64 128)
-  call void @llvm.aarch64.settag(i8* %c2, i64 128)
+  call void @use(ptr %b)
+  call void @llvm.aarch64.settag(ptr %a, i64 128)
+  call void @llvm.aarch64.settag(ptr %a2, i64 128)
+  call void @llvm.aarch64.settag(ptr %c, i64 128)
+  call void @llvm.aarch64.settag(ptr %c2, i64 128)
   ret void
 }
 
@@ -35,7 +35,7 @@ entry:
   %b = alloca i8, i32 32, align 16
   %c = alloca i8, i32 128, align 16
   %c2 = alloca i8, i32 128, align 16
-  call void @use(i8* %b)
+  call void @use(ptr %b)
   br i1 %flag, label %if.then, label %if.else
 
 if.then:
@@ -43,8 +43,8 @@ if.then:
 ; CHECK: sub     x8, x8, #32
 ; CHECK: st2g    x9, [x9], #32
 ; CHECK: cbnz    x8,
-  call void @llvm.aarch64.settag(i8* %a, i64 160)
-  call void @llvm.aarch64.settag(i8* %a2, i64 160)
+  call void @llvm.aarch64.settag(ptr %a, i64 160)
+  call void @llvm.aarch64.settag(ptr %a2, i64 160)
   br label %if.end
 
 if.else:
@@ -52,8 +52,8 @@ if.else:
 ; CHECK: sub     x8, x8, #32
 ; CHECK: st2g    x9, [x9], #32
 ; CHECK: cbnz    x8,
-  call void @llvm.aarch64.settag(i8* %c, i64 128)
-  call void @llvm.aarch64.settag(i8* %c2, i64 128)
+  call void @llvm.aarch64.settag(ptr %c, i64 128)
+  call void @llvm.aarch64.settag(ptr %c2, i64 128)
   br label %if.end
 
 if.end:
@@ -61,10 +61,10 @@ if.end:
 ; CHECK: st2g    sp, [sp], #32
 ; CHECK: sub     x8, x8, #32
 ; CHECK: cbnz    x8,
-  call void @llvm.aarch64.settag(i8* %a, i64 160)
-  call void @llvm.aarch64.settag(i8* %a2, i64 160)
-  call void @llvm.aarch64.settag(i8* %c, i64 128)
-  call void @llvm.aarch64.settag(i8* %c2, i64 128)
+  call void @llvm.aarch64.settag(ptr %a, i64 160)
+  call void @llvm.aarch64.settag(ptr %a2, i64 160)
+  call void @llvm.aarch64.settag(ptr %c, i64 128)
+  call void @llvm.aarch64.settag(ptr %c2, i64 128)
 
 ; CHECK: ret
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/settag-merge.ll b/llvm/test/CodeGen/AArch64/settag-merge.ll
index 1bf5d074bf674..591c76f5728b9 100644
--- a/llvm/test/CodeGen/AArch64/settag-merge.ll
+++ b/llvm/test/CodeGen/AArch64/settag-merge.ll
@@ -1,8 +1,8 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte -aarch64-order-frame-objects=0 | FileCheck %s
 
-declare void @use(i8* %p)
-declare void @llvm.aarch64.settag(i8* %p, i64 %a)
-declare void @llvm.aarch64.settag.zero(i8* %p, i64 %a)
+declare void @use(ptr %p)
+declare void @llvm.aarch64.settag(ptr %p, i64 %a)
+declare void @llvm.aarch64.settag.zero(ptr %p, i64 %a)
 
 define void @stg16_16() {
 entry:
@@ -11,8 +11,8 @@ entry:
 ; CHECK: ret
   %a = alloca i8, i32 16, align 16
   %b = alloca i8, i32 16, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 16)
-  call void @llvm.aarch64.settag(i8* %b, i64 16)
+  call void @llvm.aarch64.settag(ptr %a, i64 16)
+  call void @llvm.aarch64.settag(ptr %b, i64 16)
   ret void
 }
 
@@ -27,10 +27,10 @@ entry:
   %b = alloca i8, i32 16, align 16
   %c = alloca i8, i32 16, align 16
   %d = alloca i8, i32 16, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 16)
-  call void @llvm.aarch64.settag(i8* %b, i64 16)
-  call void @llvm.aarch64.settag(i8* %c, i64 16)
-  call void @llvm.aarch64.settag(i8* %d, i64 16)
+  call void @llvm.aarch64.settag(ptr %a, i64 16)
+  call void @llvm.aarch64.settag(ptr %b, i64 16)
+  call void @llvm.aarch64.settag(ptr %c, i64 16)
+  call void @llvm.aarch64.settag(ptr %d, i64 16)
   ret i32 0
 }
 
@@ -44,10 +44,10 @@ entry:
   %b = alloca i8, i32 16, align 16
   %c = alloca i8, i32 16, align 16
   %d = alloca i8, i32 16, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 16)
-  call void @llvm.aarch64.settag(i8* %b, i64 16)
-  call void @llvm.aarch64.settag(i8* %c, i64 16)
-  call void @llvm.aarch64.settag(i8* %d, i64 16)
+  call void @llvm.aarch64.settag(ptr %a, i64 16)
+  call void @llvm.aarch64.settag(ptr %b, i64 16)
+  call void @llvm.aarch64.settag(ptr %c, i64 16)
+  call void @llvm.aarch64.settag(ptr %d, i64 16)
   ret void
 }
 
@@ -63,10 +63,10 @@ entry:
   %b = alloca i8, i32 128, align 16
   %c = alloca i8, i32 128, align 16
   %d = alloca i8, i32 128, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 128)
-  call void @llvm.aarch64.settag(i8* %b, i64 128)
-  call void @llvm.aarch64.settag(i8* %c, i64 128)
-  call void @llvm.aarch64.settag(i8* %d, i64 128)
+  call void @llvm.aarch64.settag(ptr %a, i64 128)
+  call void @llvm.aarch64.settag(ptr %b, i64 128)
+  call void @llvm.aarch64.settag(ptr %c, i64 128)
+  call void @llvm.aarch64.settag(ptr %d, i64 128)
   ret void
 }
 
@@ -81,9 +81,9 @@ entry:
   %a = alloca i8, i32 16, align 16
   %b = alloca i8, i32 512, align 16
   %c = alloca i8, i32 16, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 16)
-  call void @llvm.aarch64.settag(i8* %b, i64 512)
-  call void @llvm.aarch64.settag(i8* %c, i64 16)
+  call void @llvm.aarch64.settag(ptr %a, i64 16)
+  call void @llvm.aarch64.settag(ptr %b, i64 512)
+  call void @llvm.aarch64.settag(ptr %c, i64 16)
   ret void
 }
 
@@ -98,9 +98,9 @@ entry:
   %a = alloca i8, i32 512, align 16
   %b = alloca i8, i32 512, align 16
   %c = alloca i8, i32 512, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 512)
-  call void @llvm.aarch64.settag(i8* %b, i64 512)
-  call void @llvm.aarch64.settag(i8* %c, i64 512)
+  call void @llvm.aarch64.settag(ptr %a, i64 512)
+  call void @llvm.aarch64.settag(ptr %b, i64 512)
+  call void @llvm.aarch64.settag(ptr %c, i64 512)
   ret void
 }
 
@@ -121,12 +121,12 @@ entry:
   br i1 %flag, label %if.then, label %if.end
 
 if.then:
-  call void @llvm.aarch64.settag(i8* %a, i64 48)
-  call void @llvm.aarch64.settag(i8* %b, i64 48)
+  call void @llvm.aarch64.settag(ptr %a, i64 48)
+  call void @llvm.aarch64.settag(ptr %b, i64 48)
   br label %if.end
 
 if.end:
-  call void @llvm.aarch64.settag(i8* %c, i64 48)
+  call void @llvm.aarch64.settag(ptr %c, i64 48)
   ret void
 }
 
@@ -149,12 +149,12 @@ entry:
   br i1 %flag, label %if.then, label %if.end
 
 if.then:
-  call void @llvm.aarch64.settag(i8* %a, i64 128)
-  call void @llvm.aarch64.settag(i8* %b, i64 128)
+  call void @llvm.aarch64.settag(ptr %a, i64 128)
+  call void @llvm.aarch64.settag(ptr %b, i64 128)
   br label %if.end
 
 if.end:
-  call void @llvm.aarch64.settag(i8* %c, i64 48)
+  call void @llvm.aarch64.settag(ptr %c, i64 48)
   ret void
 }
 
@@ -177,12 +177,12 @@ entry:
   br i1 %flag, label %if.then, label %if.end
 
 if.then:
-  call void @llvm.aarch64.settag(i8* %a, i64 512)
-  call void @llvm.aarch64.settag(i8* %b, i64 512)
+  call void @llvm.aarch64.settag(ptr %a, i64 512)
+  call void @llvm.aarch64.settag(ptr %b, i64 512)
   br label %if.end
 
 if.end:
-  call void @llvm.aarch64.settag(i8* %c, i64 48)
+  call void @llvm.aarch64.settag(ptr %c, i64 48)
   ret void
 }
 
@@ -205,10 +205,10 @@ entry:
   %b = alloca i8, i32 32, align 16
   %c = alloca i8, i32 128, align 16
   %c2 = alloca i8, i32 128, align 16
-  call void @use(i8* %b)
-  call void @llvm.aarch64.settag(i8* %a, i64 128)
-  call void @llvm.aarch64.settag(i8* %a2, i64 128)
-  call void @llvm.aarch64.settag(i8* %c, i64 128)
-  call void @llvm.aarch64.settag(i8* %c2, i64 128)
+  call void @use(ptr %b)
+  call void @llvm.aarch64.settag(ptr %a, i64 128)
+  call void @llvm.aarch64.settag(ptr %a2, i64 128)
+  call void @llvm.aarch64.settag(ptr %c, i64 128)
+  call void @llvm.aarch64.settag(ptr %c2, i64 128)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/settag.ll b/llvm/test/CodeGen/AArch64/settag.ll
index 7f01b1fec7550..60712eac693ea 100644
--- a/llvm/test/CodeGen/AArch64/settag.ll
+++ b/llvm/test/CodeGen/AArch64/settag.ll
@@ -1,49 +1,49 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte | FileCheck %s
 
-define void @stg1(i8* %p) {
+define void @stg1(ptr %p) {
 ; CHECK-LABEL: stg1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stg x0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag(i8* %p, i64 16)
+  call void @llvm.aarch64.settag(ptr %p, i64 16)
   ret void
 }
 
-define void @stg2(i8* %p) {
+define void @stg2(ptr %p) {
 ; CHECK-LABEL: stg2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st2g x0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag(i8* %p, i64 32)
+  call void @llvm.aarch64.settag(ptr %p, i64 32)
   ret void
 }
 
-define void @stg3(i8* %p) {
+define void @stg3(ptr %p) {
 ; CHECK-LABEL: stg3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stg x0, [x0, #32]
 ; CHECK-NEXT:    st2g x0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag(i8* %p, i64 48)
+  call void @llvm.aarch64.settag(ptr %p, i64 48)
   ret void
 }
 
-define void @stg4(i8* %p) {
+define void @stg4(ptr %p) {
 ; CHECK-LABEL: stg4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    st2g x0, [x0, #32]
 ; CHECK-NEXT:    st2g x0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag(i8* %p, i64 64)
+  call void @llvm.aarch64.settag(ptr %p, i64 64)
   ret void
 }
 
-define void @stg5(i8* %p) {
+define void @stg5(ptr %p) {
 ; CHECK-LABEL: stg5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stg x0, [x0, #64]
@@ -51,11 +51,11 @@ define void @stg5(i8* %p) {
 ; CHECK-NEXT:    st2g x0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag(i8* %p, i64 80)
+  call void @llvm.aarch64.settag(ptr %p, i64 80)
   ret void
 }
 
-define void @stg16(i8* %p) {
+define void @stg16(ptr %p) {
 ; CHECK-LABEL: stg16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, #256
@@ -67,11 +67,11 @@ define void @stg16(i8* %p) {
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag(i8* %p, i64 256)
+  call void @llvm.aarch64.settag(ptr %p, i64 256)
   ret void
 }
 
-define void @stg17(i8* %p) {
+define void @stg17(ptr %p) {
 ; CHECK-LABEL: stg17:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, #256
@@ -84,22 +84,22 @@ define void @stg17(i8* %p) {
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag(i8* %p, i64 272)
+  call void @llvm.aarch64.settag(ptr %p, i64 272)
   ret void
 }
 
-define void @stzg3(i8* %p) {
+define void @stzg3(ptr %p) {
 ; CHECK-LABEL: stzg3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stzg x0, [x0, #32]
 ; CHECK-NEXT:    stz2g x0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag.zero(i8* %p, i64 48)
+  call void @llvm.aarch64.settag.zero(ptr %p, i64 48)
   ret void
 }
 
-define void @stzg17(i8* %p) {
+define void @stzg17(ptr %p) {
 ; CHECK-LABEL: stzg17:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, #256
@@ -112,7 +112,7 @@ define void @stzg17(i8* %p) {
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    ret
 entry:
-  call void @llvm.aarch64.settag.zero(i8* %p, i64 272)
+  call void @llvm.aarch64.settag.zero(ptr %p, i64 272)
   ret void
 }
 
@@ -126,7 +126,7 @@ define void @stg_alloca1() uwtable {
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i8, i32 16, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 16)
+  call void @llvm.aarch64.settag(ptr %a, i64 16)
   ret void
 }
 
@@ -142,7 +142,7 @@ define void @stg_alloca5() uwtable {
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i8, i32 80, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 80)
+  call void @llvm.aarch64.settag(ptr %a, i64 80)
   ret void
 }
 
@@ -163,7 +163,7 @@ define void @stg_alloca17() nounwind {
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i8, i32 272, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 272)
+  call void @llvm.aarch64.settag(ptr %a, i64 272)
   ret void
 }
 
@@ -191,9 +191,9 @@ define void @stg_alloca18() uwtable {
 ; CHECK-NEXT:    ret
 entry:
   %a = alloca i8, i32 272, align 16
-  call void @llvm.aarch64.settag(i8* %a, i64 272)
+  call void @llvm.aarch64.settag(ptr %a, i64 272)
   ret void
 }
 
-declare void @llvm.aarch64.settag(i8* %p, i64 %a)
-declare void @llvm.aarch64.settag.zero(i8* %p, i64 %a)
+declare void @llvm.aarch64.settag(ptr %p, i64 %a)
+declare void @llvm.aarch64.settag.zero(ptr %p, i64 %a)

diff  --git a/llvm/test/CodeGen/AArch64/shift-amount-mod.ll b/llvm/test/CodeGen/AArch64/shift-amount-mod.ll
index 977899aac1544..4fe609097f204 100644
--- a/llvm/test/CodeGen/AArch64/shift-amount-mod.ll
+++ b/llvm/test/CodeGen/AArch64/shift-amount-mod.ll
@@ -18,19 +18,19 @@ define i32 @reg32_shl_by_negated(i32 %val, i32 %shamt) nounwind {
   %shifted = shl i32 %val, %negshamt
   ret i32 %shifted
 }
-define i32 @load32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
+define i32 @load32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: load32_shl_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
 ; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    lsl w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = shl i32 %val, %negshamt
   ret i32 %shifted
 }
-define void @store32_shl_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+define void @store32_shl_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: store32_shl_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w2
@@ -39,10 +39,10 @@ define void @store32_shl_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind
 ; CHECK-NEXT:    ret
   %negshamt = sub i32 32, %shamt
   %shifted = shl i32 %val, %negshamt
-  store i32 %shifted, i32* %dstptr
+  store i32 %shifted, ptr %dstptr
   ret void
 }
-define void @modify32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
+define void @modify32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: modify32_shl_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -50,13 +50,13 @@ define void @modify32_shl_by_negated(i32* %valptr, i32 %shamt) nounwind {
 ; CHECK-NEXT:    lsl w8, w9, w8
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = shl i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
+  store i32 %shifted, ptr %valptr
   ret void
 }
-define void @modify32_shl_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
+define void @modify32_shl_by_negated_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify32_shl_by_negated_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -67,11 +67,11 @@ define void @modify32_shl_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %s
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    str w9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = shl i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
-  store i32 %negshamt, i32* %shamtptr
+  store i32 %shifted, ptr %valptr
+  store i32 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -85,19 +85,19 @@ define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
   %shifted = shl i64 %val, %negshamt
   ret i64 %shifted
 }
-define i64 @load64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
+define i64 @load64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: load64_shl_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
 ; CHECK-NEXT:    ldr x9, [x0]
 ; CHECK-NEXT:    lsl x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = shl i64 %val, %negshamt
   ret i64 %shifted
 }
-define void @store64_shl_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+define void @store64_shl_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: store64_shl_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x2
@@ -106,10 +106,10 @@ define void @store64_shl_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind
 ; CHECK-NEXT:    ret
   %negshamt = sub i64 64, %shamt
   %shifted = shl i64 %val, %negshamt
-  store i64 %shifted, i64* %dstptr
+  store i64 %shifted, ptr %dstptr
   ret void
 }
-define void @modify64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
+define void @modify64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: modify64_shl_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -117,13 +117,13 @@ define void @modify64_shl_by_negated(i64* %valptr, i64 %shamt) nounwind {
 ; CHECK-NEXT:    lsl x8, x9, x8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = shl i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
+  store i64 %shifted, ptr %valptr
   ret void
 }
-define void @modify64_shl_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
+define void @modify64_shl_by_negated_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify64_shl_by_negated_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -134,11 +134,11 @@ define void @modify64_shl_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %s
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    str x9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = shl i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
-  store i64 %negshamt, i64* %shamtptr
+  store i64 %shifted, ptr %valptr
+  store i64 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -155,19 +155,19 @@ define i32 @reg32_lshr_by_negated(i32 %val, i32 %shamt) nounwind {
   %shifted = lshr i32 %val, %negshamt
   ret i32 %shifted
 }
-define i32 @load32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+define i32 @load32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: load32_lshr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
 ; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = lshr i32 %val, %negshamt
   ret i32 %shifted
 }
-define void @store32_lshr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+define void @store32_lshr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: store32_lshr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w2
@@ -176,10 +176,10 @@ define void @store32_lshr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwin
 ; CHECK-NEXT:    ret
   %negshamt = sub i32 32, %shamt
   %shifted = lshr i32 %val, %negshamt
-  store i32 %shifted, i32* %dstptr
+  store i32 %shifted, ptr %dstptr
   ret void
 }
-define void @modify32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+define void @modify32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: modify32_lshr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -187,13 +187,13 @@ define void @modify32_lshr_by_negated(i32* %valptr, i32 %shamt) nounwind {
 ; CHECK-NEXT:    lsr w8, w9, w8
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = lshr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
+  store i32 %shifted, ptr %valptr
   ret void
 }
-define void @modify32_lshr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
+define void @modify32_lshr_by_negated_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify32_lshr_by_negated_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -204,11 +204,11 @@ define void @modify32_lshr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    str w9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = lshr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
-  store i32 %negshamt, i32* %shamtptr
+  store i32 %shifted, ptr %valptr
+  store i32 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -222,19 +222,19 @@ define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
   %shifted = lshr i64 %val, %negshamt
   ret i64 %shifted
 }
-define i64 @load64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+define i64 @load64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: load64_lshr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
 ; CHECK-NEXT:    ldr x9, [x0]
 ; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = lshr i64 %val, %negshamt
   ret i64 %shifted
 }
-define void @store64_lshr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+define void @store64_lshr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: store64_lshr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x2
@@ -243,10 +243,10 @@ define void @store64_lshr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwin
 ; CHECK-NEXT:    ret
   %negshamt = sub i64 64, %shamt
   %shifted = lshr i64 %val, %negshamt
-  store i64 %shifted, i64* %dstptr
+  store i64 %shifted, ptr %dstptr
   ret void
 }
-define void @modify64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+define void @modify64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: modify64_lshr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -254,13 +254,13 @@ define void @modify64_lshr_by_negated(i64* %valptr, i64 %shamt) nounwind {
 ; CHECK-NEXT:    lsr x8, x9, x8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = lshr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
+  store i64 %shifted, ptr %valptr
   ret void
 }
-define void @modify64_lshr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
+define void @modify64_lshr_by_negated_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify64_lshr_by_negated_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -271,11 +271,11 @@ define void @modify64_lshr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    str x9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = lshr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
-  store i64 %negshamt, i64* %shamtptr
+  store i64 %shifted, ptr %valptr
+  store i64 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -292,19 +292,19 @@ define i32 @reg32_ashr_by_negated(i32 %val, i32 %shamt) nounwind {
   %shifted = ashr i32 %val, %negshamt
   ret i32 %shifted
 }
-define i32 @load32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+define i32 @load32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: load32_ashr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
 ; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    asr w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = ashr i32 %val, %negshamt
   ret i32 %shifted
 }
-define void @store32_ashr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+define void @store32_ashr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: store32_ashr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w2
@@ -313,10 +313,10 @@ define void @store32_ashr_by_negated(i32 %val, i32* %dstptr, i32 %shamt) nounwin
 ; CHECK-NEXT:    ret
   %negshamt = sub i32 32, %shamt
   %shifted = ashr i32 %val, %negshamt
-  store i32 %shifted, i32* %dstptr
+  store i32 %shifted, ptr %dstptr
   ret void
 }
-define void @modify32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
+define void @modify32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: modify32_ashr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -324,13 +324,13 @@ define void @modify32_ashr_by_negated(i32* %valptr, i32 %shamt) nounwind {
 ; CHECK-NEXT:    asr w8, w9, w8
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = ashr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
+  store i32 %shifted, ptr %valptr
   ret void
 }
-define void @modify32_ashr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
+define void @modify32_ashr_by_negated_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify32_ashr_by_negated_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w1
@@ -341,11 +341,11 @@ define void @modify32_ashr_by_negated_multi_use(i32* %valptr, i32 %shamt, i32* %
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    str w9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 32, %shamt
   %shifted = ashr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
-  store i32 %negshamt, i32* %shamtptr
+  store i32 %shifted, ptr %valptr
+  store i32 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -359,19 +359,19 @@ define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
   %shifted = ashr i64 %val, %negshamt
   ret i64 %shifted
 }
-define i64 @load64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+define i64 @load64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: load64_ashr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
 ; CHECK-NEXT:    ldr x9, [x0]
 ; CHECK-NEXT:    asr x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = ashr i64 %val, %negshamt
   ret i64 %shifted
 }
-define void @store64_ashr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+define void @store64_ashr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: store64_ashr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x2
@@ -380,10 +380,10 @@ define void @store64_ashr_by_negated(i64 %val, i64* %dstptr, i64 %shamt) nounwin
 ; CHECK-NEXT:    ret
   %negshamt = sub i64 64, %shamt
   %shifted = ashr i64 %val, %negshamt
-  store i64 %shifted, i64* %dstptr
+  store i64 %shifted, ptr %dstptr
   ret void
 }
-define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
+define void @modify64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: modify64_ashr_by_negated:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -391,13 +391,13 @@ define void @modify64_ashr_by_negated(i64* %valptr, i64 %shamt) nounwind {
 ; CHECK-NEXT:    asr x8, x9, x8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = ashr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
+  store i64 %shifted, ptr %valptr
   ret void
 }
-define void @modify64_ashr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
+define void @modify64_ashr_by_negated_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify64_ashr_by_negated_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg x8, x1
@@ -408,11 +408,11 @@ define void @modify64_ashr_by_negated_multi_use(i64* %valptr, i64 %shamt, i64* %
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    str x9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 64, %shamt
   %shifted = ashr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
-  store i64 %negshamt, i64* %shamtptr
+  store i64 %shifted, ptr %valptr
+  store i64 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -433,19 +433,19 @@ define i32 @reg32_shl_by_complemented(i32 %val, i32 %shamt) nounwind {
   %shifted = shl i32 %val, %negshamt
   ret i32 %shifted
 }
-define i32 @load32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+define i32 @load32_shl_by_complemented(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: load32_shl_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
 ; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    lsl w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = shl i32 %val, %negshamt
   ret i32 %shifted
 }
-define void @store32_shl_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+define void @store32_shl_by_complemented(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: store32_shl_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w2
@@ -454,10 +454,10 @@ define void @store32_shl_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nou
 ; CHECK-NEXT:    ret
   %negshamt = sub i32 31, %shamt
   %shifted = shl i32 %val, %negshamt
-  store i32 %shifted, i32* %dstptr
+  store i32 %shifted, ptr %dstptr
   ret void
 }
-define void @modify32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+define void @modify32_shl_by_complemented(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: modify32_shl_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
@@ -465,13 +465,13 @@ define void @modify32_shl_by_complemented(i32* %valptr, i32 %shamt) nounwind {
 ; CHECK-NEXT:    lsl w8, w9, w8
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = shl i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
+  store i32 %shifted, ptr %valptr
   ret void
 }
-define void @modify32_shl_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
+define void @modify32_shl_by_complemented_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify32_shl_by_complemented_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
@@ -482,11 +482,11 @@ define void @modify32_shl_by_complemented_multi_use(i32* %valptr, i32 %shamt, i3
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    str w9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = shl i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
-  store i32 %negshamt, i32* %shamtptr
+  store i32 %shifted, ptr %valptr
+  store i32 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -500,19 +500,19 @@ define i64 @reg64_shl_by_complemented(i64 %val, i64 %shamt) nounwind {
   %shifted = shl i64 %val, %negshamt
   ret i64 %shifted
 }
-define i64 @load64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+define i64 @load64_shl_by_complemented(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: load64_shl_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
 ; CHECK-NEXT:    ldr x9, [x0]
 ; CHECK-NEXT:    lsl x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = shl i64 %val, %negshamt
   ret i64 %shifted
 }
-define void @store64_shl_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+define void @store64_shl_by_complemented(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: store64_shl_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x2
@@ -521,10 +521,10 @@ define void @store64_shl_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nou
 ; CHECK-NEXT:    ret
   %negshamt = sub i64 63, %shamt
   %shifted = shl i64 %val, %negshamt
-  store i64 %shifted, i64* %dstptr
+  store i64 %shifted, ptr %dstptr
   ret void
 }
-define void @modify64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+define void @modify64_shl_by_complemented(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: modify64_shl_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
@@ -532,13 +532,13 @@ define void @modify64_shl_by_complemented(i64* %valptr, i64 %shamt) nounwind {
 ; CHECK-NEXT:    lsl x8, x9, x8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = shl i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
+  store i64 %shifted, ptr %valptr
   ret void
 }
-define void @modify64_shl_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
+define void @modify64_shl_by_complemented_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify64_shl_by_complemented_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
@@ -549,11 +549,11 @@ define void @modify64_shl_by_complemented_multi_use(i64* %valptr, i64 %shamt, i6
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    str x9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = shl i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
-  store i64 %negshamt, i64* %shamtptr
+  store i64 %shifted, ptr %valptr
+  store i64 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -570,19 +570,19 @@ define i32 @reg32_lshr_by_complemented(i32 %val, i32 %shamt) nounwind {
   %shifted = lshr i32 %val, %negshamt
   ret i32 %shifted
 }
-define i32 @load32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+define i32 @load32_lshr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: load32_lshr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
 ; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    lsr w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = lshr i32 %val, %negshamt
   ret i32 %shifted
 }
-define void @store32_lshr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+define void @store32_lshr_by_complemented(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: store32_lshr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w2
@@ -591,10 +591,10 @@ define void @store32_lshr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) no
 ; CHECK-NEXT:    ret
   %negshamt = sub i32 31, %shamt
   %shifted = lshr i32 %val, %negshamt
-  store i32 %shifted, i32* %dstptr
+  store i32 %shifted, ptr %dstptr
   ret void
 }
-define void @modify32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+define void @modify32_lshr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: modify32_lshr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
@@ -602,13 +602,13 @@ define void @modify32_lshr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
 ; CHECK-NEXT:    lsr w8, w9, w8
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = lshr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
+  store i32 %shifted, ptr %valptr
   ret void
 }
-define void @modify32_lshr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
+define void @modify32_lshr_by_complemented_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify32_lshr_by_complemented_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
@@ -619,11 +619,11 @@ define void @modify32_lshr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    str w9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = lshr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
-  store i32 %negshamt, i32* %shamtptr
+  store i32 %shifted, ptr %valptr
+  store i32 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -637,19 +637,19 @@ define i64 @reg64_lshr_by_complemented(i64 %val, i64 %shamt) nounwind {
   %shifted = lshr i64 %val, %negshamt
   ret i64 %shifted
 }
-define i64 @load64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+define i64 @load64_lshr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: load64_lshr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
 ; CHECK-NEXT:    ldr x9, [x0]
 ; CHECK-NEXT:    lsr x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = lshr i64 %val, %negshamt
   ret i64 %shifted
 }
-define void @store64_lshr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+define void @store64_lshr_by_complemented(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: store64_lshr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x2
@@ -658,10 +658,10 @@ define void @store64_lshr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) no
 ; CHECK-NEXT:    ret
   %negshamt = sub i64 63, %shamt
   %shifted = lshr i64 %val, %negshamt
-  store i64 %shifted, i64* %dstptr
+  store i64 %shifted, ptr %dstptr
   ret void
 }
-define void @modify64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+define void @modify64_lshr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: modify64_lshr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
@@ -669,13 +669,13 @@ define void @modify64_lshr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
 ; CHECK-NEXT:    lsr x8, x9, x8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = lshr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
+  store i64 %shifted, ptr %valptr
   ret void
 }
-define void @modify64_lshr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
+define void @modify64_lshr_by_complemented_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify64_lshr_by_complemented_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
@@ -686,11 +686,11 @@ define void @modify64_lshr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    str x9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = lshr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
-  store i64 %negshamt, i64* %shamtptr
+  store i64 %shifted, ptr %valptr
+  store i64 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -707,19 +707,19 @@ define i32 @reg32_ashr_by_complemented(i32 %val, i32 %shamt) nounwind {
   %shifted = ashr i32 %val, %negshamt
   ret i32 %shifted
 }
-define i32 @load32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+define i32 @load32_ashr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: load32_ashr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
 ; CHECK-NEXT:    ldr w9, [x0]
 ; CHECK-NEXT:    asr w0, w9, w8
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = ashr i32 %val, %negshamt
   ret i32 %shifted
 }
-define void @store32_ashr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) nounwind {
+define void @store32_ashr_by_complemented(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: store32_ashr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w2
@@ -728,10 +728,10 @@ define void @store32_ashr_by_complemented(i32 %val, i32* %dstptr, i32 %shamt) no
 ; CHECK-NEXT:    ret
   %negshamt = sub i32 31, %shamt
   %shifted = ashr i32 %val, %negshamt
-  store i32 %shifted, i32* %dstptr
+  store i32 %shifted, ptr %dstptr
   ret void
 }
-define void @modify32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
+define void @modify32_ashr_by_complemented(ptr %valptr, i32 %shamt) nounwind {
 ; CHECK-LABEL: modify32_ashr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
@@ -739,13 +739,13 @@ define void @modify32_ashr_by_complemented(i32* %valptr, i32 %shamt) nounwind {
 ; CHECK-NEXT:    asr w8, w9, w8
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = ashr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
+  store i32 %shifted, ptr %valptr
   ret void
 }
-define void @modify32_ashr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i32* %shamtptr) nounwind {
+define void @modify32_ashr_by_complemented_multi_use(ptr %valptr, i32 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify32_ashr_by_complemented_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn w8, w1
@@ -756,11 +756,11 @@ define void @modify32_ashr_by_complemented_multi_use(i32* %valptr, i32 %shamt, i
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    str w9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valptr
+  %val = load i32, ptr %valptr
   %negshamt = sub i32 31, %shamt
   %shifted = ashr i32 %val, %negshamt
-  store i32 %shifted, i32* %valptr
-  store i32 %negshamt, i32* %shamtptr
+  store i32 %shifted, ptr %valptr
+  store i32 %negshamt, ptr %shamtptr
   ret void
 }
 
@@ -774,19 +774,19 @@ define i64 @reg64_ashr_by_complemented(i64 %val, i64 %shamt) nounwind {
   %shifted = ashr i64 %val, %negshamt
   ret i64 %shifted
 }
-define i64 @load64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+define i64 @load64_ashr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: load64_ashr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
 ; CHECK-NEXT:    ldr x9, [x0]
 ; CHECK-NEXT:    asr x0, x9, x8
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = ashr i64 %val, %negshamt
   ret i64 %shifted
 }
-define void @store64_ashr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) nounwind {
+define void @store64_ashr_by_complemented(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: store64_ashr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x2
@@ -795,10 +795,10 @@ define void @store64_ashr_by_complemented(i64 %val, i64* %dstptr, i64 %shamt) no
 ; CHECK-NEXT:    ret
   %negshamt = sub i64 63, %shamt
   %shifted = ashr i64 %val, %negshamt
-  store i64 %shifted, i64* %dstptr
+  store i64 %shifted, ptr %dstptr
   ret void
 }
-define void @modify64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
+define void @modify64_ashr_by_complemented(ptr %valptr, i64 %shamt) nounwind {
 ; CHECK-LABEL: modify64_ashr_by_complemented:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
@@ -806,13 +806,13 @@ define void @modify64_ashr_by_complemented(i64* %valptr, i64 %shamt) nounwind {
 ; CHECK-NEXT:    asr x8, x9, x8
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = ashr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
+  store i64 %shifted, ptr %valptr
   ret void
 }
-define void @modify64_ashr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i64* %shamtptr) nounwind {
+define void @modify64_ashr_by_complemented_multi_use(ptr %valptr, i64 %shamt, ptr %shamtptr) nounwind {
 ; CHECK-LABEL: modify64_ashr_by_complemented_multi_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
@@ -823,11 +823,11 @@ define void @modify64_ashr_by_complemented_multi_use(i64* %valptr, i64 %shamt, i
 ; CHECK-NEXT:    str x8, [x0]
 ; CHECK-NEXT:    str x9, [x2]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valptr
+  %val = load i64, ptr %valptr
   %negshamt = sub i64 63, %shamt
   %shifted = ashr i64 %val, %negshamt
-  store i64 %shifted, i64* %valptr
-  store i64 %negshamt, i64* %shamtptr
+  store i64 %shifted, ptr %valptr
+  store i64 %negshamt, ptr %shamtptr
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/shift-by-signext.ll b/llvm/test/CodeGen/AArch64/shift-by-signext.ll
index 3acd842e64044..47bdc02d29dfd 100644
--- a/llvm/test/CodeGen/AArch64/shift-by-signext.ll
+++ b/llvm/test/CodeGen/AArch64/shift-by-signext.ll
@@ -106,7 +106,7 @@ define i32 @n7_fshr(i32 %x, i32 %y, i8 %shamt) nounwind {
   ret i32 %r
 }
 
-define i32 @n8_extrause(i32 %x, i8 %shamt, i32* %shamt_wide_store) nounwind {
+define i32 @n8_extrause(i32 %x, i8 %shamt, ptr %shamt_wide_store) nounwind {
 ; CHECK-LABEL: n8_extrause:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sxtb w8, w1
@@ -114,7 +114,7 @@ define i32 @n8_extrause(i32 %x, i8 %shamt, i32* %shamt_wide_store) nounwind {
 ; CHECK-NEXT:    str w8, [x2]
 ; CHECK-NEXT:    ret
   %shamt_wide = sext i8 %shamt to i32
-  store i32 %shamt_wide, i32* %shamt_wide_store, align 4
+  store i32 %shamt_wide, ptr %shamt_wide_store, align 4
   %r = shl i32 %x, %shamt_wide
   ret i32 %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/shift-logic.ll b/llvm/test/CodeGen/AArch64/shift-logic.ll
index be1ddccf901b8..6826ede7c707c 100644
--- a/llvm/test/CodeGen/AArch64/shift-logic.ll
+++ b/llvm/test/CodeGen/AArch64/shift-logic.ll
@@ -138,7 +138,7 @@ define i32 @ashr_overshift_xor(i32 %x, i32 %y) nounwind {
   ret i32 %sh1
 }
 
-define i32 @lshr_or_extra_use(i32 %x, i32 %y, i32* %p) nounwind {
+define i32 @lshr_or_extra_use(i32 %x, i32 %y, ptr %p) nounwind {
 ; CHECK-LABEL: lshr_or_extra_use:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w1, w0, lsr #5
@@ -147,7 +147,7 @@ define i32 @lshr_or_extra_use(i32 %x, i32 %y, i32* %p) nounwind {
 ; CHECK-NEXT:    ret
   %sh0 = lshr i32 %x, 5
   %r = or i32 %sh0, %y
-  store i32 %r, i32* %p
+  store i32 %r, ptr %p
   %sh1 = lshr i32 %r, 7
   ret i32 %sh1
 }
@@ -163,7 +163,7 @@ define i64 @desirable_to_commute1(i64 %x) {
   ret i64 %s2
 }
 
-define i64 @desirable_to_commute2(i64* %p, i64 %i) {
+define i64 @desirable_to_commute2(ptr %p, i64 %i) {
 ; CHECK-LABEL: desirable_to_commute2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and x8, x1, #0x1ff8
@@ -171,15 +171,15 @@ define i64 @desirable_to_commute2(i64* %p, i64 %i) {
 ; CHECK-NEXT:    ret
   %lshr = lshr i64 %i, 3
   %and = and i64 %lshr, 1023
-  %pidx = getelementptr i64, i64* %p, i64 %and
-  %r = load i64, i64* %pidx
+  %pidx = getelementptr i64, ptr %p, i64 %and
+  %r = load i64, ptr %pidx
   ret i64 %r
 }
 
 ; Shrink demanded op will shrink the shl to i32,
 ; Lshr and shl will have 
diff erent shift amount type.
 ; Compare apint will cause crash when type is 
diff erent.
-define void @apint_type_mismatch(i16 %a, i32* %p) {
+define void @apint_type_mismatch(i16 %a, ptr %p) {
 ; CHECK-LABEL: apint_type_mismatch:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    and w8, w0, #0x7f8
@@ -191,6 +191,6 @@ entry:
   %zext = zext i16 %and to i64
   %shl = shl i64 %zext, 3
   %trunc = trunc i64 %shl to i32
-  store i32 %trunc, i32* %p
+  store i32 %trunc, ptr %p
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/shrink-wrap.ll b/llvm/test/CodeGen/AArch64/shrink-wrap.ll
index ea101a8da15d5..518a0126e23d6 100644
--- a/llvm/test/CodeGen/AArch64/shrink-wrap.ll
+++ b/llvm/test/CodeGen/AArch64/shrink-wrap.ll
@@ -17,7 +17,7 @@
 @g10 = external unnamed_addr constant [144 x i32], align 4
 @g11 = external unnamed_addr global i32, align 4
 @g12 = external unnamed_addr global [144 x [144 x i8]], align 1
- at g13 = external unnamed_addr global %type1*, align 8
+ at g13 = external unnamed_addr global ptr, align 8
 @g14 = external unnamed_addr global [144 x [144 x i8]], align 1
 @g15 = external unnamed_addr global [144 x [144 x i8]], align 1
 @g16 = external unnamed_addr global [144 x [144 x i8]], align 1
@@ -50,31 +50,31 @@ if.then.6:
   unreachable
 
 if.end.9:
-  %tmp = load i32, i32* @g1, align 4
+  %tmp = load i32, ptr @g1, align 4
   %rem.i = urem i32 %tmp, 1000000
   %idxprom.1.i = zext i32 %rem.i to i64
-  %tmp1 = load %type1*, %type1** @g13, align 8
-  %v4 = getelementptr inbounds %type1, %type1* %tmp1, i64 %idxprom.1.i, i32 0
-  %.b = load i1, i1* @g2, align 1
+  %tmp1 = load ptr, ptr @g13, align 8
+  %v4 = getelementptr inbounds %type1, ptr %tmp1, i64 %idxprom.1.i, i32 0
+  %.b = load i1, ptr @g2, align 1
   %v5 = select i1 %.b, i32 2, i32 0
-  %tmp2 = load i32, i32* @g18, align 4
-  %tmp3 = load i32, i32* @g11, align 4
+  %tmp2 = load i32, ptr @g18, align 4
+  %tmp3 = load i32, ptr @g11, align 4
   %idxprom58 = sext i32 %tmp3 to i64
-  %tmp4 = load i32, i32* @g21, align 4
+  %tmp4 = load i32, ptr @g21, align 4
   %idxprom69 = sext i32 %tmp4 to i64
   br label %for.body
 
 for.body:
   %v6 = phi i32 [ 0, %if.end.9 ], [ %v7, %for.inc ]
   %a.0983 = phi i32 [ 1, %if.end.9 ], [ %a.1, %for.inc ]
-  %arrayidx = getelementptr inbounds [62 x i32], [62 x i32]* @g17, i64 0, i64 undef
-  %tmp5 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds [62 x i32], ptr @g17, i64 0, i64 undef
+  %tmp5 = load i32, ptr %arrayidx, align 4
   br i1 undef, label %for.inc, label %if.else.51
 
 if.else.51:
   %idxprom53 = sext i32 %tmp5 to i64
-  %arrayidx54 = getelementptr inbounds [144 x i32], [144 x i32]* @g3, i64 0, i64 %idxprom53
-  %tmp6 = load i32, i32* %arrayidx54, align 4
+  %arrayidx54 = getelementptr inbounds [144 x i32], ptr @g3, i64 0, i64 %idxprom53
+  %tmp6 = load i32, ptr %arrayidx54, align 4
   switch i32 %tmp6, label %for.inc [
     i32 1, label %block.bb
     i32 10, label %block.bb.159
@@ -87,80 +87,80 @@ if.else.51:
   ]
 
 block.bb:
-  %arrayidx56 = getelementptr inbounds [144 x i32], [144 x i32]* @g6, i64 0, i64 %idxprom53
-  %tmp7 = load i32, i32* %arrayidx56, align 4
+  %arrayidx56 = getelementptr inbounds [144 x i32], ptr @g6, i64 0, i64 %idxprom53
+  %tmp7 = load i32, ptr %arrayidx56, align 4
   %shr = ashr i32 %tmp7, %v5
   %add57 = add nsw i32 %shr, 0
-  %arrayidx61 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g14, i64 0, i64 %idxprom53, i64 %idxprom58
-  %tmp8 = load i8, i8* %arrayidx61, align 1
+  %arrayidx61 = getelementptr inbounds [144 x [144 x i8]], ptr @g14, i64 0, i64 %idxprom53, i64 %idxprom58
+  %tmp8 = load i8, ptr %arrayidx61, align 1
   %conv = zext i8 %tmp8 to i32
   %add62 = add nsw i32 %conv, %add57
   br label %for.inc
 
 block.bb.75:
-  %arrayidx78 = getelementptr inbounds [144 x i32], [144 x i32]* @g10, i64 0, i64 %idxprom53
-  %tmp9 = load i32, i32* %arrayidx78, align 4
+  %arrayidx78 = getelementptr inbounds [144 x i32], ptr @g10, i64 0, i64 %idxprom53
+  %tmp9 = load i32, ptr %arrayidx78, align 4
   %shr79 = ashr i32 %tmp9, %v5
   %add80 = add nsw i32 %shr79, 0
   %add86 = add nsw i32 0, %add80
   br label %for.inc
 
 block.bb.87:
-  %arrayidx90 = getelementptr inbounds [144 x i32], [144 x i32]* @g9, i64 0, i64 %idxprom53
-  %tmp10 = load i32, i32* %arrayidx90, align 4
+  %arrayidx90 = getelementptr inbounds [144 x i32], ptr @g9, i64 0, i64 %idxprom53
+  %tmp10 = load i32, ptr %arrayidx90, align 4
   %shr91 = ashr i32 %tmp10, 0
   %sub92 = sub nsw i32 0, %shr91
-  %arrayidx96 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g15, i64 0, i64 %idxprom53, i64 %idxprom69
-  %tmp11 = load i8, i8* %arrayidx96, align 1
+  %arrayidx96 = getelementptr inbounds [144 x [144 x i8]], ptr @g15, i64 0, i64 %idxprom53, i64 %idxprom69
+  %tmp11 = load i8, ptr %arrayidx96, align 1
   %conv97 = zext i8 %tmp11 to i32
   %sub98 = sub nsw i32 %sub92, %conv97
   br label %for.inc
 
 block.bb.111:
-  %arrayidx114 = getelementptr inbounds [144 x i32], [144 x i32]* @g19, i64 0, i64 %idxprom53
-  %tmp12 = load i32, i32* %arrayidx114, align 4
+  %arrayidx114 = getelementptr inbounds [144 x i32], ptr @g19, i64 0, i64 %idxprom53
+  %tmp12 = load i32, ptr %arrayidx114, align 4
   %shr115 = ashr i32 %tmp12, 0
   %sub116 = sub nsw i32 0, %shr115
-  %arrayidx120 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g12, i64 0, i64 %idxprom53, i64 %idxprom69
-  %tmp13 = load i8, i8* %arrayidx120, align 1
+  %arrayidx120 = getelementptr inbounds [144 x [144 x i8]], ptr @g12, i64 0, i64 %idxprom53, i64 %idxprom69
+  %tmp13 = load i8, ptr %arrayidx120, align 1
   %conv121 = zext i8 %tmp13 to i32
   %sub122 = sub nsw i32 %sub116, %conv121
   br label %for.inc
 
 block.bb.123:
-  %arrayidx126 = getelementptr inbounds [144 x i32], [144 x i32]* @g5, i64 0, i64 %idxprom53
-  %tmp14 = load i32, i32* %arrayidx126, align 4
+  %arrayidx126 = getelementptr inbounds [144 x i32], ptr @g5, i64 0, i64 %idxprom53
+  %tmp14 = load i32, ptr %arrayidx126, align 4
   %shr127 = ashr i32 %tmp14, %v5
   %add128 = add nsw i32 %shr127, 0
   %add134 = add nsw i32 0, %add128
   br label %for.inc
 
 block.bb.135:
-  %arrayidx138 = getelementptr inbounds [144 x i32], [144 x i32]* @g4, i64 0, i64 %idxprom53
-  %tmp15 = load i32, i32* %arrayidx138, align 4
+  %arrayidx138 = getelementptr inbounds [144 x i32], ptr @g4, i64 0, i64 %idxprom53
+  %tmp15 = load i32, ptr %arrayidx138, align 4
   %shr139 = ashr i32 %tmp15, 0
   %sub140 = sub nsw i32 0, %shr139
-  %arrayidx144 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g20, i64 0, i64 %idxprom53, i64 %idxprom69
-  %tmp16 = load i8, i8* %arrayidx144, align 1
+  %arrayidx144 = getelementptr inbounds [144 x [144 x i8]], ptr @g20, i64 0, i64 %idxprom53, i64 %idxprom69
+  %tmp16 = load i8, ptr %arrayidx144, align 1
   %conv145 = zext i8 %tmp16 to i32
   %sub146 = sub nsw i32 %sub140, %conv145
   br label %for.inc
 
 block.bb.147:
-  %arrayidx150 = getelementptr inbounds [144 x i32], [144 x i32]* @g8, i64 0, i64 %idxprom53
-  %tmp17 = load i32, i32* %arrayidx150, align 4
+  %arrayidx150 = getelementptr inbounds [144 x i32], ptr @g8, i64 0, i64 %idxprom53
+  %tmp17 = load i32, ptr %arrayidx150, align 4
   %shr151 = ashr i32 %tmp17, %v5
   %add152 = add nsw i32 %shr151, 0
-  %arrayidx156 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g16, i64 0, i64 %idxprom53, i64 %idxprom58
-  %tmp18 = load i8, i8* %arrayidx156, align 1
+  %arrayidx156 = getelementptr inbounds [144 x [144 x i8]], ptr @g16, i64 0, i64 %idxprom53, i64 %idxprom58
+  %tmp18 = load i8, ptr %arrayidx156, align 1
   %conv157 = zext i8 %tmp18 to i32
   %add158 = add nsw i32 %conv157, %add152
   br label %for.inc
 
 block.bb.159:
   %sub160 = add nsw i32 %v6, -450
-  %arrayidx162 = getelementptr inbounds [144 x i32], [144 x i32]* @g7, i64 0, i64 %idxprom53
-  %tmp19 = load i32, i32* %arrayidx162, align 4
+  %arrayidx162 = getelementptr inbounds [144 x i32], ptr @g7, i64 0, i64 %idxprom53
+  %tmp19 = load i32, ptr %arrayidx162, align 4
   %shr163 = ashr i32 %tmp19, 0
   %sub164 = sub nsw i32 %sub160, %shr163
   %sub170 = sub nsw i32 %sub164, 0
@@ -173,9 +173,9 @@ for.inc:
   br i1 %cmp48, label %for.end, label %for.body
 
 for.end:
-  store i32 %tmp, i32* %v4, align 4
-  %hold_hash.i.7 = getelementptr inbounds %type1, %type1* %tmp1, i64 %idxprom.1.i, i32 1
-  store i32 0, i32* %hold_hash.i.7, align 4
+  store i32 %tmp, ptr %v4, align 4
+  %hold_hash.i.7 = getelementptr inbounds %type1, ptr %tmp1, i64 %idxprom.1.i, i32 1
+  store i32 0, ptr %hold_hash.i.7, align 4
   br label %cleanup
 
 cleanup:

diff  --git a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
index 6dbfcf8a39ff6..1410daa0b9182 100644
--- a/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
+++ b/llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll
@@ -15,14 +15,14 @@
 ;
 ; RUN: llc -mtriple aarch64-linux %s -o - | FileCheck %s
 
-define dso_local void @f(i32 %n, i32* nocapture %x) uwtable {
+define dso_local void @f(i32 %n, ptr nocapture %x) uwtable {
 entry:
   %cmp = icmp slt i32 %n, 0
   br i1 %cmp, label %return, label %if.end
 
 if.end:                                           ; preds = %entry
   %0 = zext i32 %n to i64
-  %1 = tail call i8* @llvm.stacksave()
+  %1 = tail call ptr @llvm.stacksave()
   %vla = alloca i32, i64 %0, align 16
   %cmp132 = icmp eq i32 %n, 0
   br i1 %cmp132, label %for.cond.cleanup8, label %for.body.lr.ph
@@ -40,25 +40,25 @@ for.body:                                         ; preds = %for.body, %for.body
   %2 = trunc i64 %indvars.iv34 to i32
   %sub2 = sub i32 %sub, %2
   %idxprom = sext i32 %sub2 to i64
-  %arrayidx = getelementptr inbounds i32, i32* %x, i64 %idxprom
-  %3 = load i32, i32* %arrayidx, align 4
-  %arrayidx4 = getelementptr inbounds i32, i32* %vla, i64 %indvars.iv34
-  store i32 %3, i32* %arrayidx4, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %x, i64 %idxprom
+  %3 = load i32, ptr %arrayidx, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %vla, i64 %indvars.iv34
+  store i32 %3, ptr %arrayidx4, align 4
   %indvars.iv.next35 = add nuw nsw i64 %indvars.iv34, 1
   %exitcond37 = icmp eq i64 %indvars.iv.next35, %0
   br i1 %exitcond37, label %for.cond6.preheader, label %for.body
 
 for.cond.cleanup8:                                ; preds = %for.body9, %if.end, %for.cond6.preheader
-  tail call void @llvm.stackrestore(i8* %1)
+  tail call void @llvm.stackrestore(ptr %1)
   br label %return
 
 for.body9:                                        ; preds = %for.cond6.preheader, %for.body9
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body9 ], [ 0, %for.cond6.preheader ]
-  %arrayidx11 = getelementptr inbounds i32, i32* %vla, i64 %indvars.iv
-  %4 = load i32, i32* %arrayidx11, align 4
+  %arrayidx11 = getelementptr inbounds i32, ptr %vla, i64 %indvars.iv
+  %4 = load i32, ptr %arrayidx11, align 4
   %add = add nsw i32 %4, 1
-  %arrayidx13 = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
-  store i32 %add, i32* %arrayidx13, align 4
+  %arrayidx13 = getelementptr inbounds i32, ptr %x, i64 %indvars.iv
+  store i32 %add, ptr %arrayidx13, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond = icmp eq i64 %indvars.iv.next, %0
   br i1 %exitcond, label %for.cond.cleanup8, label %for.body9
@@ -68,10 +68,10 @@ return:                                           ; preds = %entry, %for.cond.cl
 }
 
 ; Function Attrs: nounwind
-declare i8* @llvm.stacksave()
+declare ptr @llvm.stacksave()
 
 ; Function Attrs: nounwind
-declare void @llvm.stackrestore(i8*)
+declare void @llvm.stackrestore(ptr)
 
 ; Check that llvm.stackrestore() happens before CSRs are popped off the stack
 

diff  --git a/llvm/test/CodeGen/AArch64/sibling-call.ll b/llvm/test/CodeGen/AArch64/sibling-call.ll
index 7fcee6c3d1ade..c7a48e3075671 100644
--- a/llvm/test/CodeGen/AArch64/sibling-call.ll
+++ b/llvm/test/CodeGen/AArch64/sibling-call.ll
@@ -100,7 +100,7 @@ define dso_local void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
 
 }
 
- at func = dso_local global void(i32)* null
+ at func = dso_local global ptr null
 
 define dso_local void @indirect_tail() {
 ; CHECK-LABEL: indirect_tail:
@@ -110,7 +110,7 @@ define dso_local void @indirect_tail() {
 ; CHECK-NEXT:    ldr x1, [x8, :lo12:func]
 ; CHECK-NEXT:    br x1
 
-  %fptr = load void(i32)*, void(i32)** @func
+  %fptr = load ptr, ptr @func
   tail call void %fptr(i32 42)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll b/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
index 72ebf7161f785..117e3a926e7ec 100644
--- a/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
+++ b/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -mtriple=aarch64-none-eabi -filetype=obj -o - <%s | llvm-dwarfdump -v - | FileCheck --check-prefix=CHECK-DUMP %s
 
 @.str = private unnamed_addr constant [15 x i8] c"some exception\00", align 1
- at _ZTIPKc = external dso_local constant i8*
+ at _ZTIPKc = external dso_local constant ptr
 
 ; CHECK: @_Z3fooi
 ; CHECK-V8A: hint #25
@@ -14,21 +14,20 @@ define dso_local i32 @_Z3fooi(i32 %x) #0 {
 entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %exception = call i8* @__cxa_allocate_exception(i64 8) #1
-  %0 = bitcast i8* %exception to i8**
-  store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str, i32 0, i32 0), i8** %0, align 16
-  call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIPKc to i8*), i8* null) #2
+  store i32 %x, ptr %x.addr, align 4
+  %exception = call ptr @__cxa_allocate_exception(i64 8) #1
+  store ptr @.str, ptr %exception, align 16
+  call void @__cxa_throw(ptr %exception, ptr @_ZTIPKc, ptr null) #2
   unreachable
 
 return:                                           ; No predecessors!
-  %1 = load i32, i32* %retval, align 4
-  ret i32 %1
+  %0 = load i32, ptr %retval, align 4
+  ret i32 %0
 }
 
-declare dso_local i8* @__cxa_allocate_exception(i64)
+declare dso_local ptr @__cxa_allocate_exception(i64)
 
-declare dso_local void @__cxa_throw(i8*, i8*, i8*)
+declare dso_local void @__cxa_throw(ptr, ptr, ptr)
 
 attributes #0 = { "sign-return-address"="all" }
 

diff  --git a/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll b/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
index 447c6d165985a..e56b9b4820da4 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll
@@ -1,8 +1,8 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu | FileCheck %s
 
-define i128 @ldp_single_csdb(i128* %p) speculative_load_hardening {
+define i128 @ldp_single_csdb(ptr %p) speculative_load_hardening {
 entry:
-  %0 = load i128, i128* %p, align 16
+  %0 = load i128, ptr %p, align 16
   ret i128 %0
 ; CHECK-LABEL: ldp_single_csdb
 ; CHECK:      ldp   x8, x1, [x0]
@@ -18,9 +18,9 @@ entry:
 ; CHECK-NEXT: ret
 }
 
-define double @ld_double(double* %p) speculative_load_hardening {
+define double @ld_double(ptr %p) speculative_load_hardening {
 entry:
-  %0 = load double, double* %p, align 8
+  %0 = load double, ptr %p, align 8
   ret double %0
 ; Checking that the address laoded from is masked for a floating point load.
 ; CHECK-LABEL: ld_double
@@ -35,9 +35,9 @@ entry:
 ; CHECK-NEXT: ret
 }
 
-define i32 @csdb_emitted_for_subreg_use(i64* %p, i32 %b) speculative_load_hardening {
+define i32 @csdb_emitted_for_subreg_use(ptr %p, i32 %b) speculative_load_hardening {
 entry:
-  %X = load i64, i64* %p, align 8
+  %X = load i64, ptr %p, align 8
   %X_trunc = trunc i64 %X to i32
   %add = add i32 %b, %X_trunc
   %iszero = icmp eq i64 %X, 0
@@ -60,9 +60,9 @@ entry:
 ; CHECK-NEXT: ret
 }
 
-define i64 @csdb_emitted_for_superreg_use(i32* %p, i64 %b) speculative_load_hardening {
+define i64 @csdb_emitted_for_superreg_use(ptr %p, i64 %b) speculative_load_hardening {
 entry:
-  %X = load i32, i32* %p, align 4
+  %X = load i32, ptr %p, align 4
   %X_ext = zext i32 %X to i64
   %add = add i64 %b, %X_ext
   %iszero = icmp eq i32 %X, 0
@@ -85,13 +85,13 @@ entry:
 ; CHECK-NEXT: ret
 }
 
-define i64 @no_masking_with_full_control_flow_barriers(i64 %a, i64 %b, i64* %p) speculative_load_hardening {
+define i64 @no_masking_with_full_control_flow_barriers(i64 %a, i64 %b, ptr %p) speculative_load_hardening {
 ; CHECK-LABEL: no_masking_with_full_control_flow_barriers
 ; CHECK: dsb sy
 ; CHECK: isb
 entry:
   %0 = tail call i64 asm "hint #12", "={x17},{x16},0"(i64 %b, i64 %a)
-  %X = load i64, i64* %p, align 8
+  %X = load i64, ptr %p, align 8
   %ret = add i64 %X, %0
 ; CHECK-NOT: csdb
 ; CHECK-NOT: and
@@ -99,12 +99,12 @@ entry:
   ret i64 %ret
 }
 
-define void @f_implicitdef_vector_load(<4 x i32>* %dst, <2 x i32>* %src) speculative_load_hardening
+define void @f_implicitdef_vector_load(ptr %dst, ptr %src) speculative_load_hardening
 {
 entry:
-  %0 = load <2 x i32>, <2 x i32>* %src, align 8
+  %0 = load <2 x i32>, ptr %src, align 8
   %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
-  store <4 x i32> %shuffle, <4 x i32>* %dst, align 4
+  store <4 x i32> %shuffle, ptr %dst, align 4
   ret void
 ; CHECK-LABEL: f_implicitdef_vector_load
 ; CHECK:       cmp     sp, #0
@@ -120,7 +120,7 @@ entry:
 ; CHECK-NEXT:  ret
 }
 
-define <2 x double> @f_usedefvectorload(double* %a, double* %b) speculative_load_hardening {
+define <2 x double> @f_usedefvectorload(ptr %a, ptr %b) speculative_load_hardening {
 entry:
 ; CHECK-LABEL: f_usedefvectorload
 ; CHECK:       cmp     sp, #0
@@ -133,7 +133,7 @@ entry:
 ; CHECK-NEXT:  and     [[TMPREG]], [[TMPREG]], x16
 ; CHECK-NEXT:  mov     sp, [[TMPREG]]
 ; CHECK-NEXT:  ret
-  %0 = load double, double* %b, align 16
+  %0 = load double, ptr %b, align 16
   %vld1_lane = insertelement <2 x double> <double undef, double 0.000000e+00>, double %0, i32 0
   ret <2 x double> %vld1_lane
 }
@@ -153,6 +153,6 @@ entry:
 ; CHECK-NEXT:  mov     sp, [[TMPREG]]
 ; CHECK-NEXT:  ret
   %a = alloca i32, align 4
-  %val = load volatile i32, i32* %a, align 4
+  %val = load volatile i32, ptr %a, align 4
   ret i32 undef
 }

diff  --git a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
index d1c7c78c3eb5c..f380b2d05d863 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
@@ -31,16 +31,16 @@ if.else:                                          ; preds = %entry
 ; CHECK-NEXT: .Lfunc_end
 }
 
- at __const.indirect_branch.ptr = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@indirect_branch, %return), i8* blockaddress(@indirect_branch, %l2)], align 8
+ at __const.indirect_branch.ptr = private unnamed_addr constant [2 x ptr] [ptr blockaddress(@indirect_branch, %return), ptr blockaddress(@indirect_branch, %l2)], align 8
 
 ; Function Attrs: norecurse nounwind readnone
 define dso_local i32 @indirect_branch(i32 %a, i32 %b, i32 %i) {
 ; CHECK-LABEL: indirect_branch:
 entry:
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @__const.indirect_branch.ptr, i64 0, i64 %idxprom
-  %0 = load i8*, i8** %arrayidx, align 8
-  indirectbr i8* %0, [label %return, label %l2]
+  %arrayidx = getelementptr inbounds [2 x ptr], ptr @__const.indirect_branch.ptr, i64 0, i64 %idxprom
+  %0 = load ptr, ptr %arrayidx, align 8
+  indirectbr ptr %0, [label %return, label %l2]
 ; CHECK:       br x
 ; ISBDSB-NEXT: dsb sy
 ; ISBDSB-NEXT: isb
@@ -116,14 +116,12 @@ d:                             ; preds = %asm.fallthrough, %entry
 }
 
 define dso_local i32 @indirect_call(
-i32 (...)* nocapture %f1, i32 (...)* nocapture %f2) {
+ptr nocapture %f1, ptr nocapture %f2) {
 entry:
 ; CHECK-LABEL: indirect_call:
-  %callee.knr.cast = bitcast i32 (...)* %f1 to i32 ()*
-  %call = tail call i32 %callee.knr.cast()
+  %call = tail call i32 %f1()
 ; HARDEN: bl {{__llvm_slsblr_thunk_x[0-9]+$}}
-  %callee.knr.cast1 = bitcast i32 (...)* %f2 to i32 ()*
-  %call2 = tail call i32 %callee.knr.cast1()
+  %call2 = tail call i32 %f2()
 ; HARDEN: bl {{__llvm_slsblr_thunk_x[0-9]+$}}
   %add = add nsw i32 %call2, %call
   ret i32 %add
@@ -131,15 +129,15 @@ entry:
 }
 
 ; verify calling through a function pointer.
- at a = dso_local local_unnamed_addr global i32 (...)* null, align 8
+ at a = dso_local local_unnamed_addr global ptr null, align 8
 @b = dso_local local_unnamed_addr global i32 0, align 4
 define dso_local void @indirect_call_global() local_unnamed_addr {
 ; CHECK-LABEL: indirect_call_global:
 entry:
-  %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @a to i32 ()**), align 8
+  %0 = load ptr, ptr @a, align 8
   %call = tail call i32 %0()  nounwind
 ; HARDEN: bl {{__llvm_slsblr_thunk_x[0-9]+$}}
-  store i32 %call, i32* @b, align 4
+  store i32 %call, ptr @b, align 4
   ret void
 ; CHECK: .Lfunc_end
 }
@@ -148,11 +146,11 @@ entry:
 ; as a linker is allowed to clobber x16 or x17 on calls, which would break the
 ; correct execution of the code sequence produced by the mitigation. The below
 ; test attempts to force *%f into x16 using inline assembly.
-define i64 @check_x16(i64 ()** nocapture readonly %fp, i64 ()** nocapture readonly %fp2) "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x30,+reserve-x9" {
+define i64 @check_x16(ptr nocapture readonly %fp, ptr nocapture readonly %fp2) "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x30,+reserve-x9" {
 entry:
 ; CHECK-LABEL: check_x16:
-  %f = load i64 ()*, i64 ()** %fp, align 8
-  %x16_f = tail call i64 ()* asm "add $0, $1, #0", "={x16},{x16}"(i64 ()* %f) nounwind
+  %f = load ptr, ptr %fp, align 8
+  %x16_f = tail call ptr asm "add $0, $1, #0", "={x16},{x16}"(ptr %f) nounwind
   %call1 = call i64 %x16_f()
 ; NOHARDEN:   blr x16
 ; ISBDSB-NOT: bl __llvm_slsblr_thunk_x16
@@ -167,30 +165,27 @@ entry:
 ; Since this is sensitive to register allocation choices, only check this with
 ; DAGIsel to avoid too much accidental breaking of this test that is a bit
 ; brittle.
-define i64 @check_x29(i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp,
-                      i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp2,
-                      i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp3)
+define i64 @check_x29(ptr nocapture readonly %fp,
+                      ptr nocapture readonly %fp2,
+                      ptr nocapture readonly %fp3)
 "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x9"
 "frame-pointer"="none"
 {
 entry:
 ; CHECK-LABEL: check_x29:
-  %0 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp, align 8
-  %1 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp2 to i8**
-  %2 = load i8*, i8** %1, align 8
-  %3 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp2, align 8
-  %4 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp3 to i8**
-  %5 = load i8*, i8** %4, align 8
-  %6 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp3, align 8
-  %7 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp to i8**
-  %8 = load i8*, i8** %7, align 8
-  %call = call i64 %0(i8* %2, i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
-  %call1 = call i64 %3(i8* %2, i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
+  %0 = load ptr, ptr %fp, align 8
+  %1 = load ptr, ptr %fp2, align 8
+  %2 = load ptr, ptr %fp2, align 8
+  %3 = load ptr, ptr %fp3, align 8
+  %4 = load ptr, ptr %fp3, align 8
+  %5 = load ptr, ptr %fp, align 8
+  %call = call i64 %0(ptr %1, ptr %3, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
+  %call1 = call i64 %2(ptr %1, ptr %3, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
 ; NOHARDEN:      blr x29
 ; ISBDSBDAGISEL: bl __llvm_slsblr_thunk_x29
 ; SBDAGISEL:     bl __llvm_slsblr_thunk_x29
 ; CHECK
-  %call2 = call i64 %6(i8* %2, i8* %8, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
+  %call2 = call i64 %4(ptr %1, ptr %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
   %add = add nsw i64 %call1, %call
   %add1 = add nsw i64 %call2, %add
   ret i64 %add1

diff  --git a/llvm/test/CodeGen/AArch64/speculation-hardening.ll b/llvm/test/CodeGen/AArch64/speculation-hardening.ll
index 4efe7f851c706..cb445b47f907f 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening.ll
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening.ll
@@ -5,7 +5,7 @@
 ; RUN: sed -e 's/SLHATTR/speculative_load_hardening/' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -fast-isel | FileCheck %s --check-prefixes=CHECK,SLH
 ; RUN: sed -e 's/SLHATTR//' %s | llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -fast-isel | FileCheck %s --check-prefixes=CHECK,NOSLH
 
-define i32 @f(i8* nocapture readonly %p, i32 %i, i32 %N) local_unnamed_addr SLHATTR {
+define i32 @f(ptr nocapture readonly %p, i32 %i, i32 %N) local_unnamed_addr SLHATTR {
 ; CHECK-LABEL: f
 entry:
 ; SLH:  cmp sp, #0
@@ -32,8 +32,8 @@ if.then:                                          ; preds = %entry
 ; NOSLH-NOT: csel x16, x16, xzr, {{(lt)|(ge)|(eq)|(ne)}}
 ; SLH-DAG: csel x16, x16, xzr, {{(lt)|(ge)|(eq)|(ne)}}
   %idxprom = sext i32 %i to i64
-  %arrayidx = getelementptr inbounds i8, i8* %p, i64 %idxprom
-  %0 = load i8, i8* %arrayidx, align 1
+  %arrayidx = getelementptr inbounds i8, ptr %p, i64 %idxprom
+  %0 = load i8, ptr %arrayidx, align 1
 ; CHECK-DAG:      ldrb [[LOADED:w[0-9]+]],
   %conv = zext i8 %0 to i32
   br label %return
@@ -99,7 +99,7 @@ else:
   ret i32 %6
 }
 
-define i32 @landingpad(i32 %l0, i32 %l1) SLHATTR personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @landingpad(i32 %l0, i32 %l1) SLHATTR personality ptr @__gxx_personality_v0 {
 ; CHECK-LABEL: landingpad
 entry:
 ; SLH:  cmp sp, #0
@@ -113,14 +113,14 @@ entry:
 ; SLH:  csetm x16, ne
 
 lpad:
-  %l4 = landingpad { i8*, i32 }
-          catch i8* null
+  %l4 = landingpad { ptr, i32 }
+          catch ptr null
 ; SLH:  cmp sp, #0
 ; SLH:  csetm x16, ne
 ; NOSLH-NOT:  cmp sp, #0
 ; NOSLH-NOT:  csetm x16, ne
-  %l5 = extractvalue { i8*, i32 } %l4, 0
-  %l6 = tail call i8* @__cxa_begin_catch(i8* %l5)
+  %l5 = extractvalue { ptr, i32 } %l4, 0
+  %l6 = tail call ptr @__cxa_begin_catch(ptr %l5)
   %l7 = icmp sgt i32 %l0, %l1
   br i1 %l7, label %then, label %else
 ; GlobalISel lowers the branch to a b.ne sometimes instead of b.ge as expected..
@@ -148,5 +148,5 @@ exit:
 
 declare i32 @__gxx_personality_v0(...)
 declare void @_Z10throwing_fv() local_unnamed_addr
-declare i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+declare ptr @__cxa_begin_catch(ptr) local_unnamed_addr
 declare void @__cxa_end_catch() local_unnamed_addr

diff  --git a/llvm/test/CodeGen/AArch64/sponentry.ll b/llvm/test/CodeGen/AArch64/sponentry.ll
index 1b9bc64ef15c1..fa960563c1e20 100644
--- a/llvm/test/CodeGen/AArch64/sponentry.ll
+++ b/llvm/test/CodeGen/AArch64/sponentry.ll
@@ -3,14 +3,12 @@
 ; RUN: llc -mtriple=aarch64-windows-msvc %s -o - | FileCheck %s --check-prefix=NOFP
 ; RUN: llc -mtriple=aarch64-windows-msvc -fast-isel %s -o - | FileCheck %s --check-prefix=NOFP
 
- at env2 = common dso_local global [24 x i64]* null, align 8
+ at env2 = common dso_local global ptr null, align 8
 
 define dso_local void @bar() {
-  %1 = call i8* @llvm.sponentry()
-  %2 = load [24 x i64]*, [24 x i64]** @env2, align 8
-  %3 = getelementptr inbounds [24 x i64], [24 x i64]* %2, i32 0, i32 0
-  %4 = bitcast i64* %3 to i8*
-  %5 = call i32 @_setjmpex(i8* %4, i8* %1) #2
+  %1 = call ptr @llvm.sponentry()
+  %2 = load ptr, ptr @env2, align 8
+  %3 = call i32 @_setjmpex(ptr %2, ptr %1) #2
   ret void
 }
 
@@ -22,17 +20,15 @@ define dso_local void @bar() {
 ; NOFP: str     x30, [sp, #-16]!
 ; NOFP: add     x1, sp, #16
 
-define dso_local void @foo([24 x i64]*) {
-  %2 = alloca [24 x i64]*, align 8
+define dso_local void @foo(ptr) {
+  %2 = alloca ptr, align 8
   %3 = alloca i32, align 4
   %4 = alloca [100 x i32], align 4
-  store [24 x i64]* %0, [24 x i64]** %2, align 8
-  %5 = call i8* @llvm.sponentry()
-  %6 = load [24 x i64]*, [24 x i64]** %2, align 8
-  %7 = getelementptr inbounds [24 x i64], [24 x i64]* %6, i32 0, i32 0
-  %8 = bitcast i64* %7 to i8*
-  %9 = call i32 @_setjmpex(i8* %8, i8* %5)
-  store i32 %9, i32* %3, align 4
+  store ptr %0, ptr %2, align 8
+  %5 = call ptr @llvm.sponentry()
+  %6 = load ptr, ptr %2, align 8
+  %7 = call i32 @_setjmpex(ptr %6, ptr %5)
+  store i32 %7, ptr %3, align 4
   ret void
 }
 
@@ -45,24 +41,19 @@ define dso_local void @foo([24 x i64]*) {
 ; NOFP: sub     sp, sp, #432
 ; NOFP: add     x1, sp, #432
 
-define dso_local void @var_args(i8*, ...) {
-  %2 = alloca i8*, align 8
-  %3 = alloca i8*, align 8
-  store i8* %0, i8** %2, align 8
-  %4 = bitcast i8** %3 to i8*
-  call void @llvm.va_start(i8* %4)
-  %5 = load i8*, i8** %3, align 8
-  %6 = getelementptr inbounds i8, i8* %5, i64 8
-  store i8* %6, i8** %3, align 8
-  %7 = bitcast i8* %5 to i32*
-  %8 = load i32, i32* %7, align 8
-  %9 = bitcast i8** %3 to i8*
-  call void @llvm.va_end(i8* %9)
-  %10 = call i8* @llvm.sponentry()
-  %11 = load [24 x i64]*, [24 x i64]** @env2, align 8
-  %12 = getelementptr inbounds [24 x i64], [24 x i64]* %11, i32 0, i32 0
-  %13 = bitcast i64* %12 to i8*
-  %14 = call i32 @_setjmpex(i8* %13, i8* %10) #3
+define dso_local void @var_args(ptr, ...) {
+  %2 = alloca ptr, align 8
+  %3 = alloca ptr, align 8
+  store ptr %0, ptr %2, align 8
+  call void @llvm.va_start(ptr %3)
+  %4 = load ptr, ptr %3, align 8
+  %5 = getelementptr inbounds i8, ptr %4, i64 8
+  store ptr %5, ptr %3, align 8
+  %6 = load i32, ptr %4, align 8
+  call void @llvm.va_end(ptr %3)
+  %7 = call ptr @llvm.sponentry()
+  %8 = load ptr, ptr @env2, align 8
+  %9 = call i32 @_setjmpex(ptr %8, ptr %7) #3
   ret void
 }
 
@@ -76,11 +67,9 @@ define dso_local void @var_args(i8*, ...) {
 ; NOFP: add     x1, sp, #96
 
 define dso_local void @manyargs(i64 %x1, i64 %x2, i64 %x3, i64 %x4, i64 %x5, i64 %x6, i64 %x7, i64 %x8, i64 %x9, i64 %x10) {
-  %1 = call i8* @llvm.sponentry()
-  %2 = load [24 x i64]*, [24 x i64]** @env2, align 8
-  %3 = getelementptr inbounds [24 x i64], [24 x i64]* %2, i32 0, i32 0
-  %4 = bitcast i64* %3 to i8*
-  %5 = call i32 @_setjmpex(i8* %4, i8* %1) #2
+  %1 = call ptr @llvm.sponentry()
+  %2 = load ptr, ptr @env2, align 8
+  %3 = call i32 @_setjmpex(ptr %2, ptr %1) #2
   ret void
 }
 
@@ -92,13 +81,13 @@ define dso_local void @manyargs(i64 %x1, i64 %x2, i64 %x3, i64 %x4, i64 %x5, i64
 ; NOFP: add     x1, sp, #16
 
 ; Function Attrs: nounwind readnone
-declare i8* @llvm.sponentry()
+declare ptr @llvm.sponentry()
 
 ; Function Attrs: returns_twice
-declare dso_local i32 @_setjmpex(i8*, i8*)
+declare dso_local i32 @_setjmpex(ptr, ptr)
 
 ; Function Attrs: nounwind
-declare void @llvm.va_start(i8*) #1
+declare void @llvm.va_start(ptr) #1
 
 ; Function Attrs: nounwind
-declare void @llvm.va_end(i8*) #1
+declare void @llvm.va_end(ptr) #1

diff  --git a/llvm/test/CodeGen/AArch64/sqrt-fastmath.ll b/llvm/test/CodeGen/AArch64/sqrt-fastmath.ll
index 5eb1841e8f17d..3f52f1d35ed65 100644
--- a/llvm/test/CodeGen/AArch64/sqrt-fastmath.ll
+++ b/llvm/test/CodeGen/AArch64/sqrt-fastmath.ll
@@ -495,7 +495,7 @@ define <2 x double> @sqrt_fdiv_common_operand_vec(<2 x double> %x) nounwind {
   ret <2 x double> %r
 }
 
-define double @sqrt_fdiv_common_operand_extra_use(double %x, double* %p) nounwind {
+define double @sqrt_fdiv_common_operand_extra_use(double %x, ptr %p) nounwind {
 ; FAULT-LABEL: sqrt_fdiv_common_operand_extra_use:
 ; FAULT:       // %bb.0:
 ; FAULT-NEXT:    fsqrt d0, d0
@@ -521,12 +521,12 @@ define double @sqrt_fdiv_common_operand_extra_use(double %x, double* %p) nounwin
 ; CHECK-NEXT:    str d2, [x0]
 ; CHECK-NEXT:    ret
   %sqrt = call fast double @llvm.sqrt.f64(double %x)
-  store double %sqrt, double* %p
+  store double %sqrt, ptr %p
   %r = fdiv fast double %x, %sqrt
   ret double %r
 }
 
-define double @sqrt_simplify_before_recip_3_uses(double %x, double* %p1, double* %p2) nounwind {
+define double @sqrt_simplify_before_recip_3_uses(double %x, ptr %p1, ptr %p2) nounwind {
 ; FAULT-LABEL: sqrt_simplify_before_recip_3_uses:
 ; FAULT:       // %bb.0:
 ; FAULT-NEXT:    fsqrt d0, d0
@@ -562,12 +562,12 @@ define double @sqrt_simplify_before_recip_3_uses(double %x, double* %p1, double*
   %rsqrt = fdiv fast double 1.0, %sqrt
   %r = fdiv fast double 42.0, %sqrt
   %sqrt_fast = fdiv fast double %x, %sqrt
-  store double %rsqrt, double* %p1, align 8
-  store double %r, double* %p2, align 8
+  store double %rsqrt, ptr %p1, align 8
+  store double %r, ptr %p2, align 8
   ret double %sqrt_fast
 }
 
-define double @sqrt_simplify_before_recip_3_uses_order(double %x, double* %p1, double* %p2) nounwind {
+define double @sqrt_simplify_before_recip_3_uses_order(double %x, ptr %p1, ptr %p2) nounwind {
 ; FAULT-LABEL: sqrt_simplify_before_recip_3_uses_order:
 ; FAULT:       // %bb.0:
 ; FAULT-NEXT:    fsqrt d0, d0
@@ -609,13 +609,13 @@ define double @sqrt_simplify_before_recip_3_uses_order(double %x, double* %p1, d
   %sqrt_fast = fdiv fast double %x, %sqrt
   %r1 = fdiv fast double 42.0, %sqrt
   %r2 = fdiv fast double 43.0, %sqrt
-  store double %r1, double* %p1, align 8
-  store double %r2, double* %p2, align 8
+  store double %r1, ptr %p1, align 8
+  store double %r2, ptr %p2, align 8
   ret double %sqrt_fast
 }
 
 
-define double @sqrt_simplify_before_recip_4_uses(double %x, double* %p1, double* %p2, double* %p3) nounwind {
+define double @sqrt_simplify_before_recip_4_uses(double %x, ptr %p1, ptr %p2, ptr %p3) nounwind {
 ; FAULT-LABEL: sqrt_simplify_before_recip_4_uses:
 ; FAULT:       // %bb.0:
 ; FAULT-NEXT:    fsqrt d0, d0
@@ -665,9 +665,9 @@ define double @sqrt_simplify_before_recip_4_uses(double %x, double* %p1, double*
   %r1 = fdiv fast double 42.0, %sqrt
   %r2 = fdiv fast double 43.0, %sqrt
   %sqrt_fast = fdiv fast double %x, %sqrt
-  store double %rsqrt, double* %p1, align 8
-  store double %r1, double* %p2, align 8
-  store double %r2, double* %p3, align 8
+  store double %rsqrt, ptr %p1, align 8
+  store double %r1, ptr %p2, align 8
+  store double %r2, ptr %p3, align 8
   ret double %sqrt_fast
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index 0d6a93cc63c95..7feeac3315111 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -95,7 +95,7 @@ define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
   ret <32 x i16> %z
 }
 
-define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+define void @v8i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -103,14 +103,14 @@ define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    sqsub v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <8 x i8>, <8 x i8>* %px
-  %y = load <8 x i8>, <8 x i8>* %py
+  %x = load <8 x i8>, ptr %px
+  %y = load <8 x i8>, ptr %py
   %z = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
-  store <8 x i8> %z, <8 x i8>* %pz
+  store <8 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -124,14 +124,14 @@ define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-NEXT:    str s0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %px
-  %y = load <4 x i8>, <4 x i8>* %py
+  %x = load <4 x i8>, ptr %px
+  %y = load <4 x i8>, ptr %py
   %z = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
-  store <4 x i8> %z, <4 x i8>* %pz
+  store <4 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.b }[0], [x1]
@@ -149,14 +149,14 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    strb w9, [x2]
 ; CHECK-NEXT:    strb w8, [x2, #1]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i8>, <2 x i8>* %px
-  %y = load <2 x i8>, <2 x i8>* %py
+  %x = load <2 x i8>, ptr %px
+  %y = load <2 x i8>, ptr %py
   %z = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
-  store <2 x i8> %z, <2 x i8>* %pz
+  store <2 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+define void @v4i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -164,14 +164,14 @@ define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    sqsub v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i16>, <4 x i16>* %px
-  %y = load <4 x i16>, <4 x i16>* %py
+  %x = load <4 x i16>, ptr %px
+  %y = load <4 x i16>, ptr %py
   %z = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
-  store <4 x i16> %z, <4 x i16>* %pz
+  store <4 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+define void @v2i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.h }[0], [x1]
@@ -189,10 +189,10 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    strh w9, [x2]
 ; CHECK-NEXT:    strh w8, [x2, #2]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i16>, <2 x i16>* %px
-  %y = load <2 x i16>, <2 x i16>* %py
+  %x = load <2 x i16>, ptr %px
+  %y = load <2 x i16>, ptr %py
   %z = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
-  store <2 x i16> %z, <2 x i16>* %pz
+  store <2 x i16> %z, ptr %pz
   ret void
 }
 
@@ -205,7 +205,7 @@ define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
   ret <12 x i8> %z
 }
 
-define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+define void @v12i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v12i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q3, [x1]
@@ -215,14 +215,14 @@ define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind
 ; CHECK-NEXT:    str q0, [x2]
 ; CHECK-NEXT:    str d1, [x2, #16]
 ; CHECK-NEXT:    ret
-  %x = load <12 x i16>, <12 x i16>* %px
-  %y = load <12 x i16>, <12 x i16>* %py
+  %x = load <12 x i16>, ptr %px
+  %y = load <12 x i16>, ptr %py
   %z = call <12 x i16> @llvm.ssub.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
-  store <12 x i16> %z, <12 x i16>* %pz
+  store <12 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr b0, [x1]
@@ -230,14 +230,14 @@ define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    sqsub v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    st1 { v0.b }[0], [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i8>, <1 x i8>* %px
-  %y = load <1 x i8>, <1 x i8>* %py
+  %x = load <1 x i8>, ptr %px
+  %y = load <1 x i8>, ptr %py
   %z = call <1 x i8> @llvm.ssub.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
-  store <1 x i8> %z, <1 x i8>* %pz
+  store <1 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+define void @v1i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x1]
@@ -245,10 +245,10 @@ define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    sqsub v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str h0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i16>, <1 x i16>* %px
-  %y = load <1 x i16>, <1 x i16>* %py
+  %x = load <1 x i16>, ptr %px
+  %y = load <1 x i16>, ptr %py
   %z = call <1 x i16> @llvm.ssub.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
-  store <1 x i16> %z, <1 x i16>* %pz
+  store <1 x i16> %z, ptr %pz
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
index b71e9e2de7c96..4982f56c5484a 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=pic -frame-pointer=all | FileCheck %s
 
- at __stack_chk_guard = external global i64*
+ at __stack_chk_guard = external global ptr
 
 ; PR20558
 
@@ -43,13 +43,13 @@ define i32 @test_stack_guard_remat2() ssp {
 ; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh1, Lloh3, Lloh5
 ; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh0, Lloh2, Lloh4
 entry:
-  %StackGuardSlot = alloca i8*
-  %StackGuard = load i8*, i8** bitcast (i64** @__stack_chk_guard to i8**)
-  call void @llvm.stackprotector(i8* %StackGuard, i8** %StackGuardSlot)
+  %StackGuardSlot = alloca ptr
+  %StackGuard = load ptr, ptr @__stack_chk_guard
+  call void @llvm.stackprotector(ptr %StackGuard, ptr %StackGuardSlot)
   %container = alloca [32 x i8], align 1
-  call void @llvm.stackprotectorcheck(i8** bitcast (i64** @__stack_chk_guard to i8**))
+  call void @llvm.stackprotectorcheck(ptr @__stack_chk_guard)
   ret i32 -1
 }
 
-declare void @llvm.stackprotector(i8*, i8**) ssp
-declare void @llvm.stackprotectorcheck(i8**) ssp
+declare void @llvm.stackprotector(ptr, ptr) ssp
+declare void @llvm.stackprotectorcheck(ptr) ssp

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
index 3f8b01fb7cbd5..7b308f306cfc5 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
@@ -94,11 +94,11 @@ define dso_local void @foo(i64 %t) local_unnamed_addr #0 {
 ; CHECK-NOT: __stack_chk_guard
 entry:
   %vla = alloca i32, i64 %t, align 4
-  call void @baz(i32* nonnull %vla)
+  call void @baz(ptr nonnull %vla)
   ret void
 }
 
-declare void @baz(i32*)
+declare void @baz(ptr)
 
 ; CHECK-BAD-OFFSET: LLVM ERROR: Unable to encode Stack Protector Guard Offset
 

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-vaarg.ll b/llvm/test/CodeGen/AArch64/stack-guard-vaarg.ll
index bc039b8573c0b..79974536364e5 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-vaarg.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-vaarg.ll
@@ -3,39 +3,36 @@
 ; PR25610: -fstack-protector places the canary in the wrong place on arm64 with
 ;          va_args
 
-%struct.__va_list = type { i8*, i8*, i8*, i32, i32 }
+%struct.__va_list = type { ptr, ptr, ptr, i32, i32 }
 
 ; CHECK-LABEL: test
 ; CHECK: ldr [[GUARD:x[0-9]+]]{{.*}}:lo12:__stack_chk_guard]
 ; Make sure the canary is placed relative to the frame pointer, not
 ; the stack pointer.
 ; CHECK: stur [[GUARD]], [x29, #-8]
-define void @test(i8* %i, ...) #0 {
+define void @test(ptr %i, ...) #0 {
 entry:
   %buf = alloca [10 x i8], align 1
   %ap = alloca %struct.__va_list, align 8
   %tmp = alloca %struct.__va_list, align 8
-  %0 = getelementptr inbounds [10 x i8], [10 x i8]* %buf, i64 0, i64 0
-  call void @llvm.lifetime.start(i64 10, i8* %0)
-  %1 = bitcast %struct.__va_list* %ap to i8*
-  call void @llvm.lifetime.start(i64 32, i8* %1)
-  call void @llvm.va_start(i8* %1)
-  %2 = bitcast %struct.__va_list* %tmp to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %1, i64 32, i32 8, i1 false)
-  call void @baz(i8* %i, %struct.__va_list* nonnull %tmp)
-  call void @bar(i8* %0)
-  call void @llvm.va_end(i8* %1)
-  call void @llvm.lifetime.end(i64 32, i8* %1)
-  call void @llvm.lifetime.end(i64 10, i8* %0)
+  call void @llvm.lifetime.start(i64 10, ptr %buf)
+  call void @llvm.lifetime.start(i64 32, ptr %ap)
+  call void @llvm.va_start(ptr %ap)
+  call void @llvm.memcpy.p0.p0.i64(ptr %tmp, ptr %ap, i64 32, i32 8, i1 false)
+  call void @baz(ptr %i, ptr nonnull %tmp)
+  call void @bar(ptr %buf)
+  call void @llvm.va_end(ptr %ap)
+  call void @llvm.lifetime.end(i64 32, ptr %ap)
+  call void @llvm.lifetime.end(i64 10, ptr %buf)
   ret void
 }
 
-declare void @llvm.lifetime.start(i64, i8* nocapture)
-declare void @llvm.va_start(i8*)
-declare void @baz(i8*, %struct.__va_list*)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1)
-declare void @bar(i8*)
-declare void @llvm.va_end(i8*)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.start(i64, ptr nocapture)
+declare void @llvm.va_start(ptr)
+declare void @baz(ptr, ptr)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32, i1)
+declare void @bar(ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.lifetime.end(i64, ptr nocapture)
 
 attributes #0 = { noinline nounwind optnone ssp }

diff  --git a/llvm/test/CodeGen/AArch64/stack-protector-musttail.ll b/llvm/test/CodeGen/AArch64/stack-protector-musttail.ll
index 72fa9fe246801..844860a290eed 100644
--- a/llvm/test/CodeGen/AArch64/stack-protector-musttail.ll
+++ b/llvm/test/CodeGen/AArch64/stack-protector-musttail.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=arm64-apple-macosx -fast-isel %s -o - -start-before=stack-protector -stop-after=stack-protector  | FileCheck %s
 
- at var = global [2 x i64]* null
+ at var = global ptr null
 
 declare void @callee()
 
@@ -9,15 +9,15 @@ define void @caller1() ssp {
 ; Prologue:
 ; CHECK: @llvm.stackguard
 
-; CHECK: [[GUARD:%.*]] = call i8* @llvm.stackguard()
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = call ptr @llvm.stackguard()
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
 ; CHECK: br i1 [[TST]]
 
 ; CHECK: musttail call void @callee()
 ; CHECK-NEXT: ret void
   %var = alloca [2 x i64]
-  store [2 x i64]* %var, [2 x i64]** @var
+  store ptr %var, ptr @var
   musttail call void @callee()
   ret void
 }
@@ -27,14 +27,14 @@ define void @justret() ssp {
 ; Prologue:
 ; CHECK: @llvm.stackguard
 
-; CHECK: [[GUARD:%.*]] = call i8* @llvm.stackguard()
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = call ptr @llvm.stackguard()
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
 ; CHECK: br i1 [[TST]]
 
 ; CHECK: ret void
   %var = alloca [2 x i64]
-  store [2 x i64]* %var, [2 x i64]** @var
+  store ptr %var, ptr @var
   br label %retblock
 
 retblock:
@@ -42,27 +42,25 @@ retblock:
 }
 
 
-declare i64* @callee2()
+declare ptr @callee2()
 
-define i8* @caller2() ssp {
-; CHECK-LABEL: define i8* @caller2()
+define ptr @caller2() ssp {
+; CHECK-LABEL: define ptr @caller2()
 ; Prologue:
 ; CHECK: @llvm.stackguard
 
-; CHECK: [[GUARD:%.*]] = call i8* @llvm.stackguard()
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = call ptr @llvm.stackguard()
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
 ; CHECK: br i1 [[TST]]
 
-; CHECK: [[TMP:%.*]] = musttail call i64* @callee2()
-; CHECK-NEXT: [[RES:%.*]] = bitcast i64* [[TMP]] to i8*
-; CHECK-NEXT: ret i8* [[RES]]
+; CHECK: [[TMP:%.*]] = musttail call ptr @callee2()
+; CHECK-NEXT: ret ptr [[TMP]]
 
   %var = alloca [2 x i64]
-  store [2 x i64]* %var, [2 x i64]** @var
-  %tmp = musttail call i64* @callee2()
-  %res = bitcast i64* %tmp to i8*
-  ret i8* %res
+  store ptr %var, ptr @var
+  %tmp = musttail call ptr @callee2()
+  ret ptr %tmp
 }
 
 define void @caller3() ssp {
@@ -70,36 +68,34 @@ define void @caller3() ssp {
 ; Prologue:
 ; CHECK: @llvm.stackguard
 
-; CHECK: [[GUARD:%.*]] = call i8* @llvm.stackguard()
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = call ptr @llvm.stackguard()
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
 ; CHECK: br i1 [[TST]]
 
 ; CHECK: tail call void @callee()
 ; CHECK-NEXT: ret void
   %var = alloca [2 x i64]
-  store [2 x i64]* %var, [2 x i64]** @var
+  store ptr %var, ptr @var
   tail call void @callee()
   ret void
 }
 
-define i8* @caller4() ssp {
-; CHECK-LABEL: define i8* @caller4()
+define ptr @caller4() ssp {
+; CHECK-LABEL: define ptr @caller4()
 ; Prologue:
 ; CHECK: @llvm.stackguard
 
-; CHECK: [[GUARD:%.*]] = call i8* @llvm.stackguard()
-; CHECK: [[TOKEN:%.*]] = load volatile i8*, i8** {{%.*}}
-; CHECK: [[TST:%.*]] = icmp eq i8* [[GUARD]], [[TOKEN]]
+; CHECK: [[GUARD:%.*]] = call ptr @llvm.stackguard()
+; CHECK: [[TOKEN:%.*]] = load volatile ptr, ptr {{%.*}}
+; CHECK: [[TST:%.*]] = icmp eq ptr [[GUARD]], [[TOKEN]]
 ; CHECK: br i1 [[TST]]
 
-; CHECK: [[TMP:%.*]] = tail call i64* @callee2()
-; CHECK-NEXT: [[RES:%.*]] = bitcast i64* [[TMP]] to i8*
-; CHECK-NEXT: ret i8* [[RES]]
+; CHECK: [[TMP:%.*]] = tail call ptr @callee2()
+; CHECK-NEXT: ret ptr [[TMP]]
 
   %var = alloca [2 x i64]
-  store [2 x i64]* %var, [2 x i64]** @var
-  %tmp = tail call i64* @callee2()
-  %res = bitcast i64* %tmp to i8*
-  ret i8* %res
+  store ptr %var, ptr @var
+  %tmp = tail call ptr @callee2()
+  ret ptr %tmp
 }

diff  --git a/llvm/test/CodeGen/AArch64/stack-protector-target.ll b/llvm/test/CodeGen/AArch64/stack-protector-target.ll
index dc0a288d89186..3bbb7567e10a2 100644
--- a/llvm/test/CodeGen/AArch64/stack-protector-target.ll
+++ b/llvm/test/CodeGen/AArch64/stack-protector-target.ll
@@ -8,12 +8,11 @@
 define void @_Z1fv() sspreq {
 entry:
   %x = alloca i32, align 4
-  %0 = bitcast i32* %x to i8*
-  call void @_Z7CapturePi(i32* nonnull %x)
+  call void @_Z7CapturePi(ptr nonnull %x)
   ret void
 }
 
-declare void @_Z7CapturePi(i32*)
+declare void @_Z7CapturePi(ptr)
 
 ; ANDROID-AARCH64: mrs [[A:.*]], TPIDR_EL0
 ; ANDROID-AARCH64: ldr [[B:.*]], [[[A]], #40]

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-dbg.ll b/llvm/test/CodeGen/AArch64/stack-tagging-dbg.ll
index 9de0456549922..8b6c0c86bb4e2 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-dbg.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-dbg.ll
@@ -3,7 +3,7 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-android"
 
-declare void @use32(i32*)
+declare void @use32(ptr)
 declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone speculatable
 declare void @llvm.dbg.value(metadata, metadata, metadata) nounwind readnone speculatable
 
@@ -11,17 +11,17 @@ declare void @llvm.dbg.value(metadata, metadata, metadata) nounwind readnone spe
 define void @DbgIntrinsics() sanitize_memtag {
 entry:
   %x = alloca i32, align 4
-  call void @llvm.dbg.declare(metadata i32* %x, metadata !6, metadata !DIExpression()), !dbg !10
-  call void @llvm.dbg.value(metadata !DIArgList(i32* %x, i32* %x), metadata !6, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus)), !dbg !10
-  store i32 42, i32* %x, align 4
-  call void @use32(i32* %x)
+  call void @llvm.dbg.declare(metadata ptr %x, metadata !6, metadata !DIExpression()), !dbg !10
+  call void @llvm.dbg.value(metadata !DIArgList(ptr %x, ptr %x), metadata !6, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus)), !dbg !10
+  store i32 42, ptr %x, align 4
+  call void @use32(ptr %x)
   ret void
 }
 
 ; CHECK-LABEL: define void @DbgIntrinsics(
 ; CHECK:  [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
-; CHECK:  call void @llvm.dbg.declare(metadata { i32, [12 x i8] }* [[X]],
-; CHECK:  call void @llvm.dbg.value(metadata !DIArgList({ i32, [12 x i8] }* [[X]], { i32, [12 x i8] }* [[X]])
+; CHECK:  call void @llvm.dbg.declare(metadata ptr [[X]],
+; CHECK:  call void @llvm.dbg.value(metadata !DIArgList(ptr [[X]], ptr [[X]])
 
 
 !llvm.dbg.cu = !{!0}

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll b/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll
index 2099e722fe523..32f01b704350f 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll
@@ -3,30 +3,30 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-arm-unknown-eabi"
 
-define  void @f() local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define  void @f() local_unnamed_addr #0 personality ptr @__gxx_personality_v0 {
 start:
 ; CHECK-LABEL: start:
   %a = alloca i8, i32 48, align 8
-  call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a) #2
-; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48)
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a) #2
+; CHECK: call void @llvm.aarch64.settag(ptr %a.tag, i64 48)
   %b = alloca i8, i32 48, align 8
-  call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %b) #2
-; CHECK: call void @llvm.aarch64.settag(i8* %b.tag, i64 48)
-  invoke void @g (i8 * nonnull %a, i8 * nonnull %b) to label %next0 unwind label %lpad0
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %b) #2
+; CHECK: call void @llvm.aarch64.settag(ptr %b.tag, i64 48)
+  invoke void @g (ptr nonnull %a, ptr nonnull %b) to label %next0 unwind label %lpad0
 ; CHECK-NOT: settag
 
 next0:
 ; CHECK-LABEL: next0:
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %b)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %b)
   br label %exit
 ; CHECK-NOT: settag
 
 lpad0:
 ; CHECK-LABEL: lpad0:
-  %pad0v = landingpad { i8*, i32 } catch i8* null
-  %v = extractvalue { i8*, i32 } %pad0v, 0
-  %x = call i8* @__cxa_begin_catch(i8* %v) #2
+  %pad0v = landingpad { ptr, i32 } catch ptr null
+  %v = extractvalue { ptr, i32 } %pad0v, 0
+  %x = call ptr @__cxa_begin_catch(ptr %v) #2
   invoke void @__cxa_end_catch() to label %next1 unwind label %lpad1
 ; CHECK-NOT: settag
 
@@ -37,32 +37,32 @@ next1:
 
 lpad1:
 ; CHECK-LABEL: lpad1:
-; CHECK-DAG: call void @llvm.aarch64.settag(i8* %a, i64 48)
-; CHECK-DAG: call void @llvm.aarch64.settag(i8* %b, i64 48)
-  %pad1v = landingpad { i8*, i32 } cleanup
-  resume { i8*, i32 } %pad1v
+; CHECK-DAG: call void @llvm.aarch64.settag(ptr %a, i64 48)
+; CHECK-DAG: call void @llvm.aarch64.settag(ptr %b, i64 48)
+  %pad1v = landingpad { ptr, i32 } cleanup
+  resume { ptr, i32 } %pad1v
 
 exit:
 ; CHECK-LABEL: exit:
-; CHECK-DAG: call void @llvm.aarch64.settag(i8* %a, i64 48)
-; CHECK-DAG: call void @llvm.aarch64.settag(i8* %b, i64 48)
+; CHECK-DAG: call void @llvm.aarch64.settag(ptr %a, i64 48)
+; CHECK-DAG: call void @llvm.aarch64.settag(ptr %b, i64 48)
   ret void
 ; CHECK: ret void
 }
 
-declare void @g(i8 *, i8 *) #0
+declare void @g(ptr, ptr) #0
 
 declare dso_local i32 @__gxx_personality_v0(...)
 
-declare dso_local i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+declare dso_local ptr @__cxa_begin_catch(ptr) local_unnamed_addr
 
 declare dso_local void @__cxa_end_catch() local_unnamed_addr
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
 ; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
 
 attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { argmemonly nounwind willreturn }

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-musttail.ll b/llvm/test/CodeGen/AArch64/stack-tagging-musttail.ll
index 2fa2d967be0eb..8d3be9ad07265 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-musttail.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-musttail.ll
@@ -8,18 +8,18 @@ target triple = "aarch64-arm-unknown-eabi"
 
 define dso_local noundef i32 @_Z3bari(i32 noundef %0) sanitize_memtag {
   %2 = alloca i32, align 4
-  store i32 %0, i32* %2, align 4
-  %3 = load i32, i32* %2, align 4
+  store i32 %0, ptr %2, align 4
+  %3 = load i32, ptr %2, align 4
   ret i32 %3
 }
 
 define dso_local noundef i32 @_Z3fooi(i32 noundef %0) sanitize_memtag {
   %2 = alloca i32, align 4
   %3 = alloca i32, align 4
-  store i32 %0, i32* %2, align 4
-  store volatile i32 5, i32* %3, align 4
-  %4 = load i32, i32* %2, align 4
-  %5 = load volatile i32, i32* %3, align 4
+  store i32 %0, ptr %2, align 4
+  store volatile i32 5, ptr %3, align 4
+  %4 = load i32, ptr %2, align 4
+  %5 = load volatile i32, ptr %3, align 4
   %6 = add nsw i32 %4, %5
   ; CHECK: call void @llvm.aarch64.settag
   ; CHECK: musttail call

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-setjmp.ll b/llvm/test/CodeGen/AArch64/stack-tagging-setjmp.ll
index e5a6e8fc4fbae..4be63913a5048 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-setjmp.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-setjmp.ll
@@ -2,7 +2,7 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-linux-android29"
 
- at stackbuf = dso_local local_unnamed_addr global i8* null, align 8
+ at stackbuf = dso_local local_unnamed_addr global ptr null, align 8
 @jbuf = dso_local global [32 x i64] zeroinitializer, align 8
 
 declare void @may_jump()
@@ -10,7 +10,7 @@ declare void @may_jump()
 define dso_local noundef i1 @_Z6targetv() sanitize_memtag {
 entry:
   %buf = alloca [4096 x i8], align 1
-  %call = call i32 @setjmp(i64* noundef getelementptr inbounds ([32 x i64], [32 x i64]* @jbuf, i64 0, i64 0))
+  %call = call i32 @setjmp(ptr noundef @jbuf)
   switch i32 %call, label %while.body [
     i32 1, label %return
     i32 2, label %sw.bb1
@@ -20,14 +20,13 @@ sw.bb1:                                           ; preds = %entry
   br label %return
 
 while.body:                                       ; preds = %entry
-  %0 = getelementptr inbounds [4096 x i8], [4096 x i8]* %buf, i64 0, i64 0
-  call void @llvm.lifetime.start.p0i8(i64 4096, i8* nonnull %0) #10
-  store i8* %0, i8** @stackbuf, align 8
+  call void @llvm.lifetime.start.p0(i64 4096, ptr nonnull %buf) #10
+  store ptr %buf, ptr @stackbuf, align 8
   ; may_jump may call longjmp, going back to the switch (and then the return),
   ; bypassing the lifetime.end. This is why we need to untag on the return,
   ; rather than the lifetime.end.
   call void @may_jump()
-  call void @llvm.lifetime.end.p0i8(i64 4096, i8* nonnull %0) #10
+  call void @llvm.lifetime.end.p0(i64 4096, ptr nonnull %buf) #10
   br label %return
 
 ; CHECK-LABEL: return:
@@ -37,8 +36,8 @@ return:                                           ; preds = %entry, %while.body,
   ret i1 %retval.0
 }
 
-declare i32 @setjmp(i64* noundef) returns_twice
+declare i32 @setjmp(ptr noundef) returns_twice
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll b/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll
index f6a67baf00dba..5f3d6ed88dcd5 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll
@@ -3,21 +3,21 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-arm-unknown-eabi"
 
-declare void @use8(i8*)
+declare void @use8(ptr)
 
 define  void @f(i1 %cond) local_unnamed_addr sanitize_memtag {
 start:
 ; CHECK-LABEL: start:
   %a = alloca i8, i32 48, align 8
-  call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a)
-  call void @use8(i8* %a)
-; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48)
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a)
+  call void @use8(ptr %a)
+; CHECK: call void @llvm.aarch64.settag(ptr %a.tag, i64 48)
   br i1 %cond, label %next0, label %next1
 
 next0:
 ; CHECK-LABEL: next0:
 ; CHECK: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit0
 
 exit0:
@@ -28,7 +28,7 @@ exit0:
 next1:
 ; CHECK-LABEL: next1:
 ; CHECK: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 exit1:
@@ -41,21 +41,21 @@ define  void @diamond(i1 %cond) local_unnamed_addr sanitize_memtag {
 start:
 ; CHECK-LABEL: start:
   %a = alloca i8, i32 48, align 8
-  call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a)
-  call void @use8(i8* %a)
-; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48)
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a)
+  call void @use8(ptr %a)
+; CHECK: call void @llvm.aarch64.settag(ptr %a.tag, i64 48)
   br i1 %cond, label %next0, label %next1
 
 next0:
 ; CHECK-LABEL: next0:
 ; CHECK: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 next1:
 ; CHECK-LABEL: next1:
 ; CHECK: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 exit1:
@@ -68,15 +68,15 @@ define  void @diamond_nocover(i1 %cond) local_unnamed_addr sanitize_memtag {
 start:
 ; CHECK-LABEL: start:
   %a = alloca i8, i32 48, align 8
-  call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a)
-  call void @use8(i8* %a)
-; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48)
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a)
+  call void @use8(ptr %a)
+; CHECK: call void @llvm.aarch64.settag(ptr %a.tag, i64 48)
   br i1 %cond, label %next0, label %next1
 
 next0:
 ; CHECK-LABEL: next0:
 ; CHECK-NOT: llvm.lifetime.end
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 next1:
@@ -94,9 +94,9 @@ define  void @diamond3(i1 %cond, i1 %cond1) local_unnamed_addr sanitize_memtag {
 start:
 ; CHECK-LABEL: start:
   %a = alloca i8, i32 48, align 8
-  call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a)
-  call void @use8(i8* %a)
-; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48)
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a)
+  call void @use8(ptr %a)
+; CHECK: call void @llvm.aarch64.settag(ptr %a.tag, i64 48)
   br i1 %cond, label %next0, label %start1
 
 start1:
@@ -105,19 +105,19 @@ start1:
 next0:
 ; CHECK-LABEL: next0:
 ; CHECK: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 next1:
 ; CHECK-LABEL: next1:
 ; CHECK: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 next2:
 ; CHECK-LABEL: next2:
 ; CHECK: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 exit1:
@@ -130,9 +130,9 @@ define  void @diamond3_nocover(i1 %cond, i1 %cond1) local_unnamed_addr sanitize_
 start:
 ; CHECK-LABEL: start:
   %a = alloca i8, i32 48, align 8
-  call void @llvm.lifetime.start.p0i8(i64 48, i8* nonnull %a)
-  call void @use8(i8* %a)
-; CHECK: call void @llvm.aarch64.settag(i8* %a.tag, i64 48)
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a)
+  call void @use8(ptr %a)
+; CHECK: call void @llvm.aarch64.settag(ptr %a.tag, i64 48)
   br i1 %cond, label %next0, label %start1
 
 start1:
@@ -141,13 +141,13 @@ start1:
 next0:
 ; CHECK-LABEL: next0:
 ; CHECK-NOT: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 next1:
 ; CHECK-LABEL: next1:
 ; CHECK-NOT: call void @llvm.aarch64.settag
-  call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %a)
+  call void @llvm.lifetime.end.p0(i64 40, ptr nonnull %a)
   br label %exit1
 
 next2:
@@ -161,5 +161,5 @@ exit1:
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
index aa38bb39bd347..6eb72013fb0ed 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-stack-coloring.ll
@@ -16,26 +16,24 @@ target triple = "aarch64-unknown-linux-android29"
 
 define i32 @myCall_w2(i32 %in) sanitize_hwaddress {
 entry:
-  %a = alloca [17 x i8*], align 8
-  %a2 = alloca [16 x i8*], align 8
-  %b = bitcast [17 x i8*]* %a to i8*
-  %b2 = bitcast [16 x i8*]* %a2 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 136, i8* %b)
-  %t1 = call i32 @foo(i32 %in, i8* %b)
-  %t2 = call i32 @foo(i32 %in, i8* %b)
-  call void @llvm.lifetime.end.p0i8(i64 136, i8* %b)
-  call void @llvm.lifetime.start.p0i8(i64 128, i8* %b2)
-  %t3 = call i32 @foo(i32 %in, i8* %b2)
-  %t4 = call i32 @foo(i32 %in, i8* %b2)
-  call void @llvm.lifetime.end.p0i8(i64 128, i8* %b2)
+  %a = alloca [17 x ptr], align 8
+  %a2 = alloca [16 x ptr], align 8
+  call void @llvm.lifetime.start.p0(i64 136, ptr %a)
+  %t1 = call i32 @foo(i32 %in, ptr %a)
+  %t2 = call i32 @foo(i32 %in, ptr %a)
+  call void @llvm.lifetime.end.p0(i64 136, ptr %a)
+  call void @llvm.lifetime.start.p0(i64 128, ptr %a2)
+  %t3 = call i32 @foo(i32 %in, ptr %a2)
+  %t4 = call i32 @foo(i32 %in, ptr %a2)
+  call void @llvm.lifetime.end.p0(i64 128, ptr %a2)
   %t5 = add i32 %t1, %t2
   %t6 = add i32 %t3, %t4
   %t7 = add i32 %t5, %t6
   ret i32 %t7
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
 
-declare i32 @foo(i32, i8*)
+declare i32 @foo(i32, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll b/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll
index d791ba3431197..88c00a3304259 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll
@@ -2,19 +2,19 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte -stack-tagging-unchecked-ld-st=never | FileCheck %s --check-prefixes=NEVER,COMMON
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte -stack-tagging-unchecked-ld-st=always | FileCheck %s --check-prefixes=ALWAYS,COMMON
 
-declare void @use8(i8*)
-declare void @use16(i16*)
-declare void @use32(i32*)
-declare void @use64(i64*)
-declare void @use2x64([2 x i64]*)
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @use8(ptr)
+declare void @use16(ptr)
+declare void @use32(ptr)
+declare void @use64(ptr)
+declare void @use2x64(ptr)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
 define i64 @CallLd64() sanitize_memtag {
 entry:
   %x = alloca i64, align 4
-  call void @use64(i64* %x)
-  %a = load i64, i64* %x
+  call void @use64(ptr %x)
+  %a = load i64, ptr %x
   ret i64 %a
 }
 
@@ -31,8 +31,8 @@ entry:
 define i32 @CallLd32() sanitize_memtag {
 entry:
   %x = alloca i32, align 4
-  call void @use32(i32* %x)
-  %a = load i32, i32* %x
+  call void @use32(ptr %x)
+  %a = load i32, ptr %x
   ret i32 %a
 }
 
@@ -49,8 +49,8 @@ entry:
 define i16 @CallLd16() sanitize_memtag {
 entry:
   %x = alloca i16, align 4
-  call void @use16(i16* %x)
-  %a = load i16, i16* %x
+  call void @use16(ptr %x)
+  %a = load i16, ptr %x
   ret i16 %a
 }
 
@@ -67,8 +67,8 @@ entry:
 define i8 @CallLd8() sanitize_memtag {
 entry:
   %x = alloca i8, align 4
-  call void @use8(i8* %x)
-  %a = load i8, i8* %x
+  call void @use8(ptr %x)
+  %a = load i8, ptr %x
   ret i8 %a
 }
 
@@ -85,9 +85,9 @@ entry:
 define void @CallSt64Call() sanitize_memtag {
 entry:
   %x = alloca i64, align 4
-  call void @use64(i64* %x)
-  store i64 42, i64* %x
-  call void @use64(i64* %x)
+  call void @use64(ptr %x)
+  store i64 42, ptr %x
+  call void @use64(ptr %x)
   ret void
 }
 
@@ -105,9 +105,9 @@ entry:
 define void @CallSt32Call() sanitize_memtag {
 entry:
   %x = alloca i32, align 4
-  call void @use32(i32* %x)
-  store i32 42, i32* %x
-  call void @use32(i32* %x)
+  call void @use32(ptr %x)
+  store i32 42, ptr %x
+  call void @use32(ptr %x)
   ret void
 }
 
@@ -125,9 +125,9 @@ entry:
 define void @CallSt16Call() sanitize_memtag {
 entry:
   %x = alloca i16, align 4
-  call void @use16(i16* %x)
-  store i16 42, i16* %x
-  call void @use16(i16* %x)
+  call void @use16(ptr %x)
+  store i16 42, ptr %x
+  call void @use16(ptr %x)
   ret void
 }
 
@@ -146,9 +146,9 @@ entry:
 define void @CallSt8Call() sanitize_memtag {
 entry:
   %x = alloca i8, align 4
-  call void @use8(i8* %x)
-  store i8 42, i8* %x
-  call void @use8(i8* %x)
+  call void @use8(ptr %x)
+  store i8 42, ptr %x
+  call void @use8(ptr %x)
   ret void
 }
 
@@ -166,12 +166,11 @@ entry:
 define void @CallStPair(i64 %z) sanitize_memtag {
 entry:
   %x = alloca [2 x i64], align 8
-  call void @use2x64([2 x i64]* %x)
-  %x0 = getelementptr inbounds [2 x i64], [2 x i64]* %x, i64 0, i64 0
-  store i64 %z, i64* %x0, align 8
-  %x1 = getelementptr inbounds [2 x i64], [2 x i64]* %x, i64 0, i64 1
-  store i64 %z, i64* %x1, align 8
-  call void @use2x64([2 x i64]* %x)
+  call void @use2x64(ptr %x)
+  store i64 %z, ptr %x, align 8
+  %x1 = getelementptr inbounds [2 x i64], ptr %x, i64 0, i64 1
+  store i64 %z, ptr %x1, align 8
+  call void @use2x64(ptr %x)
   ret void
 }
 
@@ -190,13 +189,11 @@ define dso_local i8 @LargeFrame() sanitize_memtag {
 entry:
   %x = alloca [4096 x i8], align 4
   %y = alloca [4096 x i8], align 4
-  %0 = getelementptr inbounds [4096 x i8], [4096 x i8]* %x, i64 0, i64 0
-  %1 = getelementptr inbounds [4096 x i8], [4096 x i8]* %y, i64 0, i64 0
-  call void @use8(i8* %0)
-  call void @use8(i8* %1)
-  %2 = load i8, i8* %0, align 4
-  %3 = load i8, i8* %1, align 4
-  %add = add i8 %3, %2
+  call void @use8(ptr %x)
+  call void @use8(ptr %y)
+  %0 = load i8, ptr %x, align 4
+  %1 = load i8, ptr %y, align 4
+  %add = add i8 %1, %0
   ret i8 %add
 }
 
@@ -224,15 +221,12 @@ define i8 @FPOffset() "frame-pointer"="all" sanitize_memtag {
   %x = alloca [200 x i8], align 4
   %y = alloca [200 x i8], align 4
   %z = alloca [200 x i8], align 4
-  %x0 = getelementptr inbounds [200 x i8], [200 x i8]* %x, i64 0, i64 0
-  %y0 = getelementptr inbounds [200 x i8], [200 x i8]* %y, i64 0, i64 0
-  %z0 = getelementptr inbounds [200 x i8], [200 x i8]* %z, i64 0, i64 0
-  call void @use8(i8* %x0)
-  call void @use8(i8* %y0)
-  call void @use8(i8* %z0)
-  %x1 = load i8, i8* %x0, align 4
-  %y1 = load i8, i8* %y0, align 4
-  %z1 = load i8, i8* %z0, align 4
+  call void @use8(ptr %x)
+  call void @use8(ptr %y)
+  call void @use8(ptr %z)
+  %x1 = load i8, ptr %x, align 4
+  %y1 = load i8, ptr %y, align 4
+  %z1 = load i8, ptr %z, align 4
   %a = add i8 %x1, %y1
   %b = add i8 %a, %z1
   ret i8 %b

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
index a73c79d6cc985..75d4419c45e72 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
@@ -5,11 +5,11 @@ target triple = "aarch64-arm-unknown-eabi"
 define void @f() local_unnamed_addr #0  {
 S0:
 ; CHECK-LABEL: S0:
-; CHECK: %basetag = call i8* @llvm.aarch64.irg.sp(i64 0)
+; CHECK: %basetag = call ptr @llvm.aarch64.irg.sp(i64 0)
   %v = alloca i8, i32 48, align 8
-; CHECK: %v.tag = call i8* @llvm.aarch64.tagp.p0i8(i8* %v, i8* %basetag, i64 0)
+; CHECK: %v.tag = call ptr @llvm.aarch64.tagp.p0(ptr %v, ptr %basetag, i64 0)
   %w = alloca i8, i32 48, align 16
-; CHECK: %w.tag = call i8* @llvm.aarch64.tagp.p0i8(i8* %w, i8* %basetag, i64 1)
+; CHECK: %w.tag = call ptr @llvm.aarch64.tagp.p0(ptr %w, ptr %basetag, i64 1)
 
   %t0 = call i32 @g0() #1
   %b0 = icmp eq i32 %t0, 0
@@ -17,17 +17,17 @@ S0:
 
 S1:
 ; CHECK-LABEL: S1:
-  call void @llvm.lifetime.start.p0i8(i64 48, i8 * nonnull %v) #1
-; CHECK: call void @llvm.aarch64.settag(i8* %v.tag, i64 48)
-  call void @llvm.lifetime.start.p0i8(i64 48, i8 * nonnull %w) #1
-; CHECK: call void @llvm.aarch64.settag(i8* %w.tag, i64 48)
-  %t1 = call i32 @g1(i8 * nonnull %v, i8 * nonnull %w) #1
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %v) #1
+; CHECK: call void @llvm.aarch64.settag(ptr %v.tag, i64 48)
+  call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %w) #1
+; CHECK: call void @llvm.aarch64.settag(ptr %w.tag, i64 48)
+  %t1 = call i32 @g1(ptr nonnull %v, ptr nonnull %w) #1
 ; CHECK: call i32 @g1
 ; CHECK-NOT: settag{{.*}}%v
-; CHECK: call void @llvm.aarch64.settag(i8* %w, i64 48)
+; CHECK: call void @llvm.aarch64.settag(ptr %w, i64 48)
 ; CHECK-NOT: settag{{.*}}%v
-  call void @llvm.lifetime.end.p0i8(i64 48, i8 * nonnull %w) #1
-; CHECK: call void @llvm.lifetime.end.p0i8(i64 48, i8* nonnull %w.tag)
+  call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) #1
+; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w.tag)
   %b1 = icmp eq i32 %t1, 0
   br i1 %b1, label %S2, label %S3
 ; CHECK-NOT: settag
@@ -40,19 +40,19 @@ S2:
 
 S3:
 ; CHECK-LABEL: S3:
-  call void @llvm.lifetime.end.p0i8(i64 48, i8 * nonnull %v) #1
+  call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %v) #1
   tail call void @z1() #1
   br label %exit2
 ; CHECK-NOT: settag
 
 exit1:
 ; CHECK-LABEL: exit1:
-; CHECK: call void @llvm.aarch64.settag(i8* %v, i64 48)
+; CHECK: call void @llvm.aarch64.settag(ptr %v, i64 48)
   ret void
 
 exit2:
 ; CHECK-LABEL: exit2:
-; CHECK: call void @llvm.aarch64.settag(i8* %v, i64 48)
+; CHECK: call void @llvm.aarch64.settag(ptr %v, i64 48)
   ret void
 
 exit3:
@@ -65,7 +65,7 @@ exit3:
 
 declare i32 @g0() #0
 
-declare i32 @g1(i8 *, i8 *) #0
+declare i32 @g1(ptr, ptr) #0
 
 declare void @z0() #0
 
@@ -73,9 +73,9 @@ declare void @z1() #0
 
 declare void @z2() #0
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8 * nocapture) #1
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
 
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8 * nocapture) #1
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
 
 attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging.ll b/llvm/test/CodeGen/AArch64/stack-tagging.ll
index d91b6a6704d3e..8759fb12bea77 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging.ll
@@ -4,12 +4,12 @@
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-android"
 
-declare void @use8(i8*)
-declare void @use32(i32*)
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @use8(ptr)
+declare void @use32(ptr)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
-define dso_local void @noUse32(i32*) sanitize_memtag {
+define dso_local void @noUse32(ptr) sanitize_memtag {
 entry:
   ret void
 }
@@ -17,20 +17,17 @@ entry:
 define void @OneVar() sanitize_memtag {
 entry:
   %x = alloca i32, align 4
-  call void @use32(i32* %x)
+  call void @use32(ptr %x)
   ret void
 }
 
 ; CHECK-LABEL: define void @OneVar(
-; CHECK:  [[BASE:%.*]] = call i8* @llvm.aarch64.irg.sp(i64 0)
+; CHECK:  [[BASE:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
 ; CHECK:  [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
-; CHECK:  [[TX:%.*]] = call { i32, [12 x i8] }* @llvm.aarch64.tagp.{{.*}}({ i32, [12 x i8] }* [[X]], i8* [[BASE]], i64 0)
-; CHECK:  [[TX8:%.*]] = bitcast { i32, [12 x i8] }* [[TX]] to i8*
-; CHECK:  call void @llvm.aarch64.settag(i8* [[TX8]], i64 16)
-; CHECK:  [[GEP32:%.*]] = bitcast { i32, [12 x i8] }* [[TX]] to i32*
-; CHECK:  call void @use32(i32* [[GEP32]])
-; CHECK:  [[GEP8:%.*]] = bitcast { i32, [12 x i8] }* [[X]] to i8*
-; CHECK:  call void @llvm.aarch64.settag(i8* [[GEP8]], i64 16)
+; CHECK:  [[TX:%.*]] = call ptr @llvm.aarch64.tagp.{{.*}}(ptr [[X]], ptr [[BASE]], i64 0)
+; CHECK:  call void @llvm.aarch64.settag(ptr [[TX]], i64 16)
+; CHECK:  call void @use32(ptr [[TX]])
+; CHECK:  call void @llvm.aarch64.settag(ptr [[X]], i64 16)
 ; CHECK:  ret void
 
 
@@ -40,22 +37,22 @@ entry:
   %x2 = alloca i8, align 4
   %x3 = alloca i32, i32 11, align 4
   %x4 = alloca i32, align 4
-  call void @use32(i32* %x1)
-  call void @use8(i8* %x2)
-  call void @use32(i32* %x3)
+  call void @use32(ptr %x1)
+  call void @use8(ptr %x2)
+  call void @use32(ptr %x3)
   ret void
 }
 
 ; CHECK-LABEL: define void @ManyVars(
 ; CHECK:  alloca { i32, [12 x i8] }, align 16
-; CHECK:  call { i32, [12 x i8] }* @llvm.aarch64.tagp.{{.*}}({ i32, [12 x i8] }* {{.*}}, i64 0)
-; CHECK:  call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK:  call ptr @llvm.aarch64.tagp.{{.*}}(ptr {{.*}}, i64 0)
+; CHECK:  call void @llvm.aarch64.settag(ptr {{.*}}, i64 16)
 ; CHECK:  alloca { i8, [15 x i8] }, align 16
-; CHECK:  call { i8, [15 x i8] }* @llvm.aarch64.tagp.{{.*}}({ i8, [15 x i8] }* {{.*}}, i64 1)
-; CHECK:  call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK:  call ptr @llvm.aarch64.tagp.{{.*}}(ptr {{.*}}, i64 1)
+; CHECK:  call void @llvm.aarch64.settag(ptr {{.*}}, i64 16)
 ; CHECK:  alloca { [11 x i32], [4 x i8] }, align 16
-; CHECK:  call { [11 x i32], [4 x i8] }* @llvm.aarch64.tagp.{{.*}}({ [11 x i32], [4 x i8] }* {{.*}}, i64 2)
-; CHECK:  call void @llvm.aarch64.settag(i8* {{.*}}, i64 48)
+; CHECK:  call ptr @llvm.aarch64.tagp.{{.*}}(ptr {{.*}}, i64 2)
+; CHECK:  call void @llvm.aarch64.settag(ptr {{.*}}, i64 48)
 ; CHECK:  alloca i32, align 4
 ; SSI-NOT: @llvm.aarch64.tagp
 ; SSI-NOT: @llvm.aarch64.settag
@@ -64,9 +61,9 @@ entry:
 ; CHECK:  call void @use8(
 ; CHECK:  call void @use32(
 
-; CHECK:  call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
-; CHECK:  call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
-; CHECK:  call void @llvm.aarch64.settag(i8* {{.*}}, i64 48)
+; CHECK:  call void @llvm.aarch64.settag(ptr {{.*}}, i64 16)
+; CHECK:  call void @llvm.aarch64.settag(ptr {{.*}}, i64 16)
+; CHECK:  call void @llvm.aarch64.settag(ptr {{.*}}, i64 48)
 ; CHECK-NEXT:  ret void
 
 
@@ -77,10 +74,9 @@ entry:
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  call void @use8(i8* %0) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+  call void @use8(ptr %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
   br label %if.end
 
 if.end:
@@ -89,11 +85,11 @@ if.end:
 
 ; CHECK-LABEL: define void @Scope(
 ; CHECK:  br i1
-; CHECK:  call void @llvm.lifetime.start.p0i8(
+; CHECK:  call void @llvm.lifetime.start.p0(
 ; CHECK:  call void @llvm.aarch64.settag(
 ; CHECK:  call void @use8(
 ; CHECK:  call void @llvm.aarch64.settag(
-; CHECK:  call void @llvm.lifetime.end.p0i8(
+; CHECK:  call void @llvm.lifetime.end.p0(
 ; CHECK:  br label
 ; CHECK:  ret void
 
@@ -106,14 +102,13 @@ entry:
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:
-  %0 = bitcast i32* %x to i8*
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  call void @use8(i8* %0) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
-
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
-  call void @use8(i8* %0) #3
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+  call void @use8(ptr %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+  call void @use8(ptr %x) #3
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
   br label %if.end
 
 if.end:
@@ -121,12 +116,12 @@ if.end:
 }
 
 ; CHECK-LABEL: define void @BadScope(
-; CHECK:       call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK:       call void @llvm.aarch64.settag(ptr {{.*}}, i64 16)
 ; CHECK:       br i1
-; CHECK:       call void @use8(i8*
-; CHECK-NEXT:  call void @use8(i8*
+; CHECK:       call void @use8(ptr
+; CHECK-NEXT:  call void @use8(ptr
 ; CHECK:       br label
-; CHECK:       call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK:       call void @llvm.aarch64.settag(ptr {{.*}}, i64 16)
 ; CHECK-NEXT:  ret void
 
 define void @DynamicAllocas(i32 %cnt) sanitize_memtag {
@@ -135,8 +130,8 @@ entry:
   br label %l
 l:
   %y = alloca i32, align 4
-  call void @use32(i32* %x)
-  call void @use32(i32* %y)
+  call void @use32(ptr %x)
+  call void @use32(ptr %y)
   ret void
 }
 
@@ -156,46 +151,43 @@ entry:
   %x = alloca i32, align 4
   %y = alloca i32, align 4
   %z = alloca i32, align 4
-  %cx = bitcast i32* %x to i8*
-  %cy = bitcast i32* %y to i8*
-  %cz = bitcast i32* %z to i8*
   %tobool = icmp eq i8 %v, 0
-  %xy = select i1 %tobool, i32* %x, i32* %y
-  %cxcy = select i1 %tobool, i8* %cx, i8* %cy
+  %xy = select i1 %tobool, ptr %x, ptr %y
+  %cxcy = select i1 %tobool, ptr %x, ptr %y
   br label %another_bb
 
 another_bb:
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cz)
-  store i32 7, i32* %z
-  call void @noUse32(i32* %z)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cz)
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cz)
-  store i32 7, i32* %z
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cz)
-  call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cxcy)
-  store i32 8, i32* %xy
-  call void @noUse32(i32* %x)
-  call void @noUse32(i32* %y)
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cxcy)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %z)
+  store i32 7, ptr %z
+  call void @noUse32(ptr %z)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %z)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %z)
+  store i32 7, ptr %z
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %z)
+  call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cxcy)
+  store i32 8, ptr %xy
+  call void @noUse32(ptr %x)
+  call void @noUse32(ptr %y)
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cxcy)
   ret void
 }
 
 ; CHECK-LABEL: define void @UnrecognizedLifetime(
-; CHECK: call i8* @llvm.aarch64.irg.sp(i64 0)
+; CHECK: call ptr @llvm.aarch64.irg.sp(i64 0)
 ; CHECK: alloca { i32, [12 x i8] }, align 16
-; CHECK: call { i32, [12 x i8] }* @llvm.aarch64.tagp
+; CHECK: call ptr @llvm.aarch64.tagp
 ; CHECK: call void @llvm.aarch64.settag(
 ; CHECK: alloca { i32, [12 x i8] }, align 16
-; CHECK: call { i32, [12 x i8] }* @llvm.aarch64.tagp
+; CHECK: call ptr @llvm.aarch64.tagp
 ; CHECK: call void @llvm.aarch64.settag(
 ; CHECK: alloca { i32, [12 x i8] }, align 16
-; CHECK: call { i32, [12 x i8] }* @llvm.aarch64.tagp
+; CHECK: call ptr @llvm.aarch64.tagp
 ; CHECK: call void @llvm.aarch64.settag(
 ; CHECK: store i32
-; CHECK: call void @noUse32(i32*
+; CHECK: call void @noUse32(ptr
 ; CHECK: store i32
 ; CHECK: store i32
-; CHECK: call void @noUse32(i32*
+; CHECK: call void @noUse32(ptr
 ; CHECK: call void @llvm.aarch64.settag(
 ; CHECK: call void @llvm.aarch64.settag(
 ; CHECK: call void @llvm.aarch64.settag(

diff  --git a/llvm/test/CodeGen/AArch64/stack_guard_remat.ll b/llvm/test/CodeGen/AArch64/stack_guard_remat.ll
index 3206cc7d98212..e6a3432458c7d 100644
--- a/llvm/test/CodeGen/AArch64/stack_guard_remat.ll
+++ b/llvm/test/CodeGen/AArch64/stack_guard_remat.ll
@@ -36,21 +36,19 @@
 define i32 @test_stack_guard_remat() #0 {
 entry:
   %a1 = alloca [256 x i32], align 4
-  %0 = bitcast [256 x i32]* %a1 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 1024, i8* %0)
-  %arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i64 0, i64 0
-  call void @foo3(i32* %arraydecay)
+  call void @llvm.lifetime.start.p0(i64 1024, ptr %a1)
+  call void @foo3(ptr %a1)
   call void asm sideeffect "foo2", "~{w0},~{w1},~{w2},~{w3},~{w4},~{w5},~{w6},~{w7},~{w8},~{w9},~{w10},~{w11},~{w12},~{w13},~{w14},~{w15},~{w16},~{w17},~{w18},~{w19},~{w20},~{w21},~{w22},~{w23},~{w24},~{w25},~{w26},~{w27},~{w28},~{w29},~{w30}"()
-  call void @llvm.lifetime.end.p0i8(i64 1024, i8* %0)
+  call void @llvm.lifetime.end.p0(i64 1024, ptr %a1)
   ret i32 0
 }
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
 
-declare void @foo3(i32*)
+declare void @foo3(ptr)
 
 ; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
 
 attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/AArch64/stackguard-internal.ll b/llvm/test/CodeGen/AArch64/stackguard-internal.ll
index 6dcdf1619851e..a70c8874edbac 100644
--- a/llvm/test/CodeGen/AArch64/stackguard-internal.ll
+++ b/llvm/test/CodeGen/AArch64/stackguard-internal.ll
@@ -14,8 +14,7 @@ target triple = "aarch64-linux-gnu"
 define i32 @b() nounwind sspstrong {
 entry:
   %z = alloca [10 x i32], align 4
-  %arraydecay = getelementptr inbounds [10 x i32], [10 x i32]* %z, i64 0, i64 0
-  %call = call i32 @a(i32* getelementptr inbounds ([8 x i32], [8 x i32]* @__stack_chk_guard, i64 0, i64 0), i32* nonnull @x, i32* nonnull %arraydecay) #3
+  %call = call i32 @a(ptr @__stack_chk_guard, ptr nonnull @x, ptr nonnull %z) #3
   ret i32 %call
 }
-declare i32 @a(i32*, i32*, i32*)
+declare i32 @a(ptr, ptr, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/stackmap-dynamic-alloca.ll b/llvm/test/CodeGen/AArch64/stackmap-dynamic-alloca.ll
index 46e3375405ab4..232719cd706d7 100644
--- a/llvm/test/CodeGen/AArch64/stackmap-dynamic-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/stackmap-dynamic-alloca.ll
@@ -23,7 +23,7 @@
 define void @f(i32 %nelems) {
 entry:
   %mem = alloca i32, i32 %nelems
-  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 0, i32* %mem)
+  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 0, ptr %mem)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/stackmap-frame-setup.ll b/llvm/test/CodeGen/AArch64/stackmap-frame-setup.ll
index 677ff8dc25306..61568b7505678 100644
--- a/llvm/test/CodeGen/AArch64/stackmap-frame-setup.ll
+++ b/llvm/test/CodeGen/AArch64/stackmap-frame-setup.ll
@@ -4,13 +4,13 @@
 define void @caller_meta_leaf() {
 entry:
   %metadata = alloca i64, i32 3, align 8
-  store i64 11, i64* %metadata
-  store i64 12, i64* %metadata
-  store i64 13, i64* %metadata
+  store i64 11, ptr %metadata
+  store i64 12, ptr %metadata
+  store i64 13, ptr %metadata
 ; ISEL:      ADJCALLSTACKDOWN 0, 0, implicit-def
 ; ISEL-NEXT: STACKMAP
 ; ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def
-  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
+  call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata)
 ; FAST-ISEL:      ADJCALLSTACKDOWN 0, 0, implicit-def
 ; FAST-ISEL-NEXT: STACKMAP
 ; FAST-ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def

diff  --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering-lr.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering-lr.ll
index c07360810f4b2..00a1fa747d77e 100644
--- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering-lr.ll
+++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering-lr.ll
@@ -6,11 +6,11 @@ target triple = "aarch64-unknown-linux-gnu"
 
 define void @test() "frame-pointer"="all" gc "statepoint-example" {
 entry:
-  %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, void ()* elementtype(void ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" ()]
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" ()]
 ; CHECK: STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, csr_aarch64_aapcs, implicit-def $sp, implicit-def dead early-clobber $lr
   ret void
 }
 
 
 declare void @return_i1()
-declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, void ()*, i32, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)

diff  --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering-sp.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering-sp.ll
index 9532b299420dd..05b504f5635a5 100644
--- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering-sp.ll
+++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering-sp.ll
@@ -4,20 +4,20 @@
 target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "aarch64-unknown-linux-gnu"
 
-declare void @consume(i32 addrspace(1)* %obj)
+declare void @consume(ptr addrspace(1) %obj)
 
-define i1 @test(i32 addrspace(1)* %a) "frame-pointer"="all" gc "statepoint-example" {
+define i1 @test(ptr addrspace(1) %a) "frame-pointer"="all" gc "statepoint-example" {
 entry:
-  %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %a)]
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %a)]
 ; CHECK: STATEPOINT 0, 0, 0, @return_i1, 2, 0, 2, 0, 2, 0, 2, 1, 1, 8, $sp, 24, 2, 0, 2, 1, 0, 0
-  %call1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
+  %call1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
   %call2 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
-  call void @consume(i32 addrspace(1)* %call1)
+  call void @consume(ptr addrspace(1) %call1)
   ret i1 %call2
 }
 
 
 declare i1 @return_i1()
-declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, i1 ()*, i32, i32, ...)
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)
 declare i1 @llvm.experimental.gc.result.i1(token)

diff  --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
index ddd39369f733b..1344ac54e149d 100644
--- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
@@ -10,7 +10,7 @@ target triple = "aarch64-unknown-linux-gnu"
 
 declare zeroext i1 @return_i1()
 declare zeroext i32 @return_i32()
-declare i32* @return_i32ptr()
+declare ptr @return_i32ptr()
 declare float @return_float()
 declare %struct @return_struct()
 declare void @varargf(i32, ...)
@@ -29,7 +29,7 @@ define i1 @test_i1_return() gc "statepoint-example" {
 ; This is just checking that a i1 gets lowered normally when there's no extra
 ; state arguments to the statepoint
 entry:
-  %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0)
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0)
   %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
   ret i1 %call1
 }
@@ -45,12 +45,12 @@ define i32 @test_i32_return() gc "statepoint-example" {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %safepoint_token = tail call token (i64, i32, i32 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i32f(i64 0, i32 0, i32 ()* elementtype(i32 ()) @return_i32, i32 0, i32 0, i32 0, i32 0)
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i32 ()) @return_i32, i32 0, i32 0, i32 0, i32 0)
   %call1 = call zeroext i32 @llvm.experimental.gc.result.i32(token %safepoint_token)
   ret i32 %call1
 }
 
-define i32* @test_i32ptr_return() gc "statepoint-example" {
+define ptr @test_i32ptr_return() gc "statepoint-example" {
 ; CHECK-LABEL: test_i32ptr_return:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
@@ -61,9 +61,9 @@ define i32* @test_i32ptr_return() gc "statepoint-example" {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %safepoint_token = tail call token (i64, i32, i32* ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_p0i32f(i64 0, i32 0, i32* ()* elementtype(i32* ()) @return_i32ptr, i32 0, i32 0, i32 0, i32 0)
-  %call1 = call i32* @llvm.experimental.gc.result.p0i32(token %safepoint_token)
-  ret i32* %call1
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(ptr ()) @return_i32ptr, i32 0, i32 0, i32 0, i32 0)
+  %call1 = call ptr @llvm.experimental.gc.result.p0(token %safepoint_token)
+  ret ptr %call1
 }
 
 define float @test_float_return() gc "statepoint-example" {
@@ -77,7 +77,7 @@ define float @test_float_return() gc "statepoint-example" {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %safepoint_token = tail call token (i64, i32, float ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_f32f(i64 0, i32 0, float ()* elementtype(float ()) @return_float, i32 0, i32 0, i32 0, i32 0)
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(float ()) @return_float, i32 0, i32 0, i32 0, i32 0)
   %call1 = call float @llvm.experimental.gc.result.f32(token %safepoint_token)
   ret float %call1
 }
@@ -93,12 +93,12 @@ define %struct @test_struct_return() gc "statepoint-example" {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %safepoint_token = tail call token (i64, i32, %struct ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_structf(i64 0, i32 0, %struct ()* elementtype(%struct ()) @return_struct, i32 0, i32 0, i32 0, i32 0)
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(%struct ()) @return_struct, i32 0, i32 0, i32 0, i32 0)
   %call1 = call %struct @llvm.experimental.gc.result.struct(token %safepoint_token)
   ret %struct %call1
 }
 
-define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
+define i1 @test_relocate(ptr addrspace(1) %a) gc "statepoint-example" {
 ; CHECK-LABEL: test_relocate:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x30, x0, [sp, #-16]! // 8-byte Folded Spill
@@ -111,8 +111,8 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
 ; CHECK-NEXT:    ret
 ; Check that an ununsed relocate has no code-generation impact
 entry:
-  %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %a)]
-  %call1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %a)]
+  %call1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
   %call2 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
   ret i1 %call2
 }
@@ -129,9 +129,9 @@ define void @test_void_vararg() gc "statepoint-example" {
 ; CHECK-NEXT:  .Ltmp6:
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-; Check a statepoint wrapping a *void* returning vararg function works
+; Check a statepoint wrapping a *ptr returning vararg function works
 entry:
-  %safepoint_token = tail call token (i64, i32, void (i32, ...)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32varargf(i64 0, i32 0, void (i32, ...)* elementtype(void (i32, ...)) @varargf, i32 2, i32 0, i32 42, i32 43, i32 0, i32 0)
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void (i32, ...)) @varargf, i32 2, i32 0, i32 42, i32 43, i32 0, i32 0)
   ;; if we try to use the result from a statepoint wrapping a
   ;; non-void-returning varargf, we will experience a crash.
   ret void
@@ -150,14 +150,14 @@ define i1 @test_i1_return_patchable() gc "statepoint-example" {
 ; CHECK-NEXT:    ret
 ; A patchable variant of test_i1_return
 entry:
-  %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 4, i1 ()* elementtype(i1 ()) null, i32 0, i32 0, i32 0, i32 0)
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 4, ptr elementtype(i1 ()) null, i32 0, i32 0, i32 0, i32 0)
   %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
   ret i1 %call1
 }
 
-declare void @consume(i32 addrspace(1)* %obj)
+declare void @consume(ptr addrspace(1) %obj)
 
-define i1 @test_cross_bb(i32 addrspace(1)* %a, i1 %external_cond) gc "statepoint-example" {
+define i1 @test_cross_bb(ptr addrspace(1) %a, i1 %external_cond) gc "statepoint-example" {
 ; CHECK-LABEL: test_cross_bb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
@@ -184,13 +184,13 @@ define i1 @test_cross_bb(i32 addrspace(1)* %a, i1 %external_cond) gc "statepoint
 ; CHECK-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %a)]
+  %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %a)]
   br i1 %external_cond, label %left, label %right
 
 left:
-  %call1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token,  i32 0, i32 0)
+  %call1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token,  i32 0, i32 0)
   %call2 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
-  call void @consume(i32 addrspace(1)* %call1)
+  call void @consume(ptr addrspace(1) %call1)
   ret i1 %call2
 
 right:
@@ -199,9 +199,9 @@ right:
 
 %struct2 = type { i64, i64, i64 }
 
-declare void @consume_attributes(i32, i8* nest, i32, %struct2* byval(%struct2))
+declare void @consume_attributes(i32, ptr nest, i32, ptr byval(%struct2))
 
-define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-example" {
+define void @test_attributes(ptr byval(%struct2) %s) gc "statepoint-example" {
 ; CHECK-LABEL: test_attributes:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #48
@@ -223,27 +223,21 @@ define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-exampl
 entry:
 ; Check that arguments with attributes are lowered correctly.
 ; We call a function that has a nest argument and a byval argument.
-  %statepoint_token = call token (i64, i32, void (i32, i8*, i32, %struct2*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p0i8i32p0s_struct2sf(i64 0, i32 0, void (i32, i8*, i32, %struct2*)* elementtype(void (i32, i8*, i32, %struct2*)) @consume_attributes, i32 4, i32 0, i32 42, i8* nest null, i32 17, %struct2* byval(%struct2) %s, i32 0, i32 0)
+  %statepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void (i32, ptr, i32, ptr)) @consume_attributes, i32 4, i32 0, i32 42, ptr nest null, i32 17, ptr byval(%struct2) %s, i32 0, i32 0)
   ret void
 }
 
-declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, i1 ()*, i32, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...)
 declare i1 @llvm.experimental.gc.result.i1(token)
 
-declare token @llvm.experimental.gc.statepoint.p0f_i32f(i64, i32, i32 ()*, i32, i32, ...)
 declare i32 @llvm.experimental.gc.result.i32(token)
 
-declare token @llvm.experimental.gc.statepoint.p0f_p0i32f(i64, i32, i32* ()*, i32, i32, ...)
-declare i32* @llvm.experimental.gc.result.p0i32(token)
+declare ptr @llvm.experimental.gc.result.p0(token)
 
-declare token @llvm.experimental.gc.statepoint.p0f_f32f(i64, i32, float ()*, i32, i32, ...)
 declare float @llvm.experimental.gc.result.f32(token)
 
-declare token @llvm.experimental.gc.statepoint.p0f_structf(i64, i32, %struct ()*, i32, i32, ...)
 declare %struct @llvm.experimental.gc.result.struct(token)
 
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidi32varargf(i64, i32, void (i32, ...)*, i32, i32, ...)
 
-declare token @llvm.experimental.gc.statepoint.p0f_isVoidi32p0i8i32p0s_struct2sf(i64, i32, void (i32, i8*, i32, %struct2*)*, i32, i32, ...)
 
-declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
+declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32)

diff  --git a/llvm/test/CodeGen/AArch64/stgp.ll b/llvm/test/CodeGen/AArch64/stgp.ll
index efccd3a041d41..efe832359aae1 100644
--- a/llvm/test/CodeGen/AArch64/stgp.ll
+++ b/llvm/test/CodeGen/AArch64/stgp.ll
@@ -1,64 +1,64 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte | FileCheck %s
 
-define void @stgp0(i64 %a, i64 %b, i8* %p) {
+define void @stgp0(i64 %a, i64 %b, ptr %p) {
 entry:
 ; CHECK-LABEL: stgp0:
 ; CHECK: stgp x0, x1, [x2]
 ; CHECK: ret
-  call void @llvm.aarch64.stgp(i8* %p, i64 %a, i64 %b)
+  call void @llvm.aarch64.stgp(ptr %p, i64 %a, i64 %b)
   ret void
 }
 
-define void @stgp1004(i64 %a, i64 %b, i8* %p) {
+define void @stgp1004(i64 %a, i64 %b, ptr %p) {
 entry:
 ; CHECK-LABEL: stgp1004:
 ; CHECK: add [[R:x[0-9]+]], x2, #1004
 ; CHECK: stgp x0, x1, [[[R]]]
 ; CHECK: ret
-  %q = getelementptr i8, i8* %p, i32 1004
-  call void @llvm.aarch64.stgp(i8* %q, i64 %a, i64 %b)
+  %q = getelementptr i8, ptr %p, i32 1004
+  call void @llvm.aarch64.stgp(ptr %q, i64 %a, i64 %b)
   ret void
 }
 
-define void @stgp1008(i64 %a, i64 %b, i8* %p) {
+define void @stgp1008(i64 %a, i64 %b, ptr %p) {
 entry:
 ; CHECK-LABEL: stgp1008:
 ; CHECK: stgp x0, x1, [x2, #1008]
 ; CHECK: ret
-  %q = getelementptr i8, i8* %p, i32 1008
-  call void @llvm.aarch64.stgp(i8* %q, i64 %a, i64 %b)
+  %q = getelementptr i8, ptr %p, i32 1008
+  call void @llvm.aarch64.stgp(ptr %q, i64 %a, i64 %b)
   ret void
 }
 
-define void @stgp1024(i64 %a, i64 %b, i8* %p) {
+define void @stgp1024(i64 %a, i64 %b, ptr %p) {
 entry:
 ; CHECK-LABEL: stgp1024:
 ; CHECK: add [[R:x[0-9]+]], x2, #1024
 ; CHECK: stgp x0, x1, [[[R]]]
 ; CHECK: ret
-  %q = getelementptr i8, i8* %p, i32 1024
-  call void @llvm.aarch64.stgp(i8* %q, i64 %a, i64 %b)
+  %q = getelementptr i8, ptr %p, i32 1024
+  call void @llvm.aarch64.stgp(ptr %q, i64 %a, i64 %b)
   ret void
 }
 
-define void @stgp_1024(i64 %a, i64 %b, i8* %p) {
+define void @stgp_1024(i64 %a, i64 %b, ptr %p) {
 entry:
 ; CHECK-LABEL: stgp_1024:
 ; CHECK: stgp x0, x1, [x2, #-1024]
 ; CHECK: ret
-  %q = getelementptr i8, i8* %p, i32 -1024
-  call void @llvm.aarch64.stgp(i8* %q, i64 %a, i64 %b)
+  %q = getelementptr i8, ptr %p, i32 -1024
+  call void @llvm.aarch64.stgp(ptr %q, i64 %a, i64 %b)
   ret void
 }
 
-define void @stgp_1040(i64 %a, i64 %b, i8* %p) {
+define void @stgp_1040(i64 %a, i64 %b, ptr %p) {
 entry:
 ; CHECK-LABEL: stgp_1040:
 ; CHECK: sub [[R:x[0-9]+]], x2, #1040
 ; CHECK: stgp x0, x1, [x{{.*}}]
 ; CHECK: ret
-  %q = getelementptr i8, i8* %p, i32 -1040
-  call void @llvm.aarch64.stgp(i8* %q, i64 %a, i64 %b)
+  %q = getelementptr i8, ptr %p, i32 -1040
+  call void @llvm.aarch64.stgp(ptr %q, i64 %a, i64 %b)
   ret void
 }
 
@@ -69,10 +69,10 @@ entry:
 ; CHECK: stgp x1, x0, [sp, #16]
 ; CHECK: ret
   %x = alloca i8, i32 32, align 16
-  call void @llvm.aarch64.stgp(i8* %x, i64 %a, i64 %b)
-  %x1 = getelementptr i8, i8* %x, i32 16
-  call void @llvm.aarch64.stgp(i8* %x1, i64 %b, i64 %a)
+  call void @llvm.aarch64.stgp(ptr %x, i64 %a, i64 %b)
+  %x1 = getelementptr i8, ptr %x, i32 16
+  call void @llvm.aarch64.stgp(ptr %x1, i64 %b, i64 %a)
   ret void
 }
 
-declare void @llvm.aarch64.stgp(i8* %p, i64 %a, i64 %b)
+declare void @llvm.aarch64.stgp(ptr %p, i64 %a, i64 %b)

diff  --git a/llvm/test/CodeGen/AArch64/store_merge_pair_offset.ll b/llvm/test/CodeGen/AArch64/store_merge_pair_offset.ll
index a091f0fd911c6..787f711ef9a4c 100644
--- a/llvm/test/CodeGen/AArch64/store_merge_pair_offset.ll
+++ b/llvm/test/CodeGen/AArch64/store_merge_pair_offset.ll
@@ -1,12 +1,12 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 -disable-lsr -verify-machineinstrs -enable-misched=false -enable-post-misched=false -o - %s | FileCheck %s
 
-define i64 @test(i64* %a) nounwind {
+define i64 @test(ptr %a) nounwind {
   ; CHECK: ldp	x{{[0-9]+}}, x{{[0-9]+}}
   ; CHECK-NOT: ldr
-  %p1 = getelementptr inbounds i64, i64* %a, i32 64
-  %tmp1 = load i64, i64* %p1, align 2
-  %p2 = getelementptr inbounds i64, i64* %a, i32 63
-  %tmp2 = load i64, i64* %p2, align 2
+  %p1 = getelementptr inbounds i64, ptr %a, i32 64
+  %tmp1 = load i64, ptr %p1, align 2
+  %p2 = getelementptr inbounds i64, ptr %a, i32 63
+  %tmp2 = load i64, ptr %p2, align 2
   %tmp3 = add i64 %tmp1, %tmp2
   ret i64 %tmp3
 }

diff  --git a/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll b/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
index bfcb71e62e8bd..f1e624a4838d4 100644
--- a/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
+++ b/llvm/test/CodeGen/AArch64/storepairsuppress_minsize.ll
@@ -27,7 +27,7 @@ define void @test_default() uwtable {
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
   %1 = call %T_IN_BLOCK @return_in_block()
-  store %T_IN_BLOCK %1, %T_IN_BLOCK* @in_block_store
+  store %T_IN_BLOCK %1, ptr @in_block_store
   ret void
 }
 
@@ -46,7 +46,7 @@ define void @test_minsize() minsize uwtable {
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %1 = call %T_IN_BLOCK @return_in_block()
-  store %T_IN_BLOCK %1, %T_IN_BLOCK* @in_block_store
+  store %T_IN_BLOCK %1, ptr @in_block_store
   ret void
 }
 
@@ -67,6 +67,6 @@ define void @test_optsize() optsize uwtable {
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
   %1 = call %T_IN_BLOCK @return_in_block()
-  store %T_IN_BLOCK %1, %T_IN_BLOCK* @in_block_store
+  store %T_IN_BLOCK %1, ptr @in_block_store
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/strqro.ll b/llvm/test/CodeGen/AArch64/strqro.ll
index 3705c4a81abd2..72e2460ecdff4 100644
--- a/llvm/test/CodeGen/AArch64/strqro.ll
+++ b/llvm/test/CodeGen/AArch64/strqro.ll
@@ -7,8 +7,8 @@
 ; CHECK-NOSTRQRO-NOT: str q{{[0-9]+}}, [x{{[0-9]+}}, x
 define void @strqrox(fp128 %val64, i64 %base, i64 %offset) {
   %addrint = add i64 %base, %offset
-  %addr = inttoptr i64 %addrint to fp128*
-  store volatile fp128 %val64, fp128* %addr
+  %addr = inttoptr i64 %addrint to ptr
+  store volatile fp128 %val64, ptr %addr
   ret void
 }
 
@@ -18,8 +18,8 @@ define void @strqrox(fp128 %val64, i64 %base, i64 %offset) {
 ; CHECK-NOSTRQRO: str q{{[0-9]+}}, [x{{[0-9]+}}, x
 define void @strqrox_optsize(fp128 %val64, i64 %base, i64 %offset) minsize {
   %addrint = add i64 %base, %offset
-  %addr = inttoptr i64 %addrint to fp128*
-  store volatile fp128 %val64, fp128* %addr
+  %addr = inttoptr i64 %addrint to ptr
+  store volatile fp128 %val64, ptr %addr
   ret void
 }
 
@@ -29,8 +29,8 @@ define void @strqrox_optsize(fp128 %val64, i64 %base, i64 %offset) minsize {
 define void @strqrow(fp128 %val64, i64 %base, i32 %offset) {
   %offset64 = zext i32 %offset to i64
   %addrint = add i64 %base, %offset64
-  %addr = inttoptr i64 %addrint to fp128*
-  store volatile fp128 %val64, fp128* %addr
+  %addr = inttoptr i64 %addrint to ptr
+  store volatile fp128 %val64, ptr %addr
   ret void
 }
 
@@ -41,8 +41,8 @@ define void @strqrow(fp128 %val64, i64 %base, i32 %offset) {
 define void @strqrow_optsize(fp128 %val64, i64 %base, i32 %offset) minsize {
   %offset64 = zext i32 %offset to i64
   %addrint = add i64 %base, %offset64
-  %addr = inttoptr i64 %addrint to fp128*
-  store volatile fp128 %val64, fp128* %addr
+  %addr = inttoptr i64 %addrint to ptr
+  store volatile fp128 %val64, ptr %addr
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/strqu.ll b/llvm/test/CodeGen/AArch64/strqu.ll
index ea4d5906bd8a3..f7993c770750b 100644
--- a/llvm/test/CodeGen/AArch64/strqu.ll
+++ b/llvm/test/CodeGen/AArch64/strqu.ll
@@ -4,29 +4,29 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-linux-gnu    -mcpu=exynos-m3 | FileCheck %s
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64_be-linux-gnu -mcpu=exynos-m3 | FileCheck %s
 
-define void @test_split_f(<4 x float> %val, <4 x float>* %addr) {
+define void @test_split_f(<4 x float> %val, ptr %addr) {
 ; CHECK-LABEL: test_split_f:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x float> %val, <4 x float>* %addr, align 8
+  store <4 x float> %val, ptr %addr, align 8
   ret void
 }
 
-define void @test_split_d(<2 x double> %val, <2 x double>* %addr) {
+define void @test_split_d(<2 x double> %val, ptr %addr) {
 ; CHECK-LABEL: test_split_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x double> %val, <2 x double>* %addr, align 8
+  store <2 x double> %val, ptr %addr, align 8
   ret void
 }
 
-define void @test_split_128(fp128 %val, fp128* %addr) {
+define void @test_split_128(fp128 %val, ptr %addr) {
 ; CHECK-LABEL: test_split_128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
-  store fp128 %val, fp128* %addr, align 8
+  store fp128 %val, ptr %addr, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/subs-to-sub-opt.ll b/llvm/test/CodeGen/AArch64/subs-to-sub-opt.ll
index ce544d351c81b..4dfd6f8688edb 100644
--- a/llvm/test/CodeGen/AArch64/subs-to-sub-opt.ll
+++ b/llvm/test/CodeGen/AArch64/subs-to-sub-opt.ll
@@ -10,14 +10,14 @@ define i32 @test01() nounwind {
 ; CHECK: sub {{.*}}
 ; CHECK-NEXT: cmn {{.*}}
 entry:
-  %0 = load i8, i8* @a, align 1
+  %0 = load i8, ptr @a, align 1
   %conv = zext i8 %0 to i32
-  %1 = load i8, i8* @b, align 1
+  %1 = load i8, ptr @b, align 1
   %conv1 = zext i8 %1 to i32
   %s = sub nsw i32 %conv1, %conv
   %cmp0 = icmp eq i32 %s, -1
   %cmp1 = sext i1 %cmp0 to i8
-  store i8 %cmp1, i8* @a
+  store i8 %cmp1, ptr @a
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 34d4612fb9257..8b0ea9da945a2 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -6,7 +6,7 @@ target triple = "aarch64-unknown-linux-gnu"
 
 ; Make sure callers set up the arguments correctly - tests AArch64ISelLowering::LowerCALL
 
-define float @foo1(double* %x0, double* %x1, double* %x2) nounwind {
+define float @foo1(ptr %x0, ptr %x1, ptr %x2) nounwind {
 ; CHECK-LABEL: foo1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
@@ -29,9 +29,9 @@ define float @foo1(double* %x0, double* %x1, double* %x2) nounwind {
 entry:
   %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
-  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x0)
-  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x1)
-  %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, double* %x2)
+  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, ptr %x0)
+  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, ptr %x1)
+  %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, ptr %x2)
   %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  0
   %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  1
   %7 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  2
@@ -52,7 +52,7 @@ entry:
   ret float %call
 }
 
-define float @foo2(double* %x0, double* %x1) nounwind {
+define float @foo2(ptr %x0, ptr %x1) nounwind {
 ; CHECK-LABEL: foo2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
@@ -86,8 +86,8 @@ define float @foo2(double* %x0, double* %x1) nounwind {
 entry:
   %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
-  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x0)
-  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x1)
+  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, ptr %x0)
+  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, ptr %x1)
   %4 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  0
   %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  1
   %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  2
@@ -108,7 +108,7 @@ entry:
   ret float %call
 }
 
-define float @foo3(double* %x0, double* %x1, double* %x2) nounwind {
+define float @foo3(ptr %x0, ptr %x1, ptr %x2) nounwind {
 ; CHECK-LABEL: foo3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
@@ -131,9 +131,9 @@ define float @foo3(double* %x0, double* %x1, double* %x2) nounwind {
 entry:
   %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
-  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x0)
-  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x1)
-  %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, double* %x2)
+  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, ptr %x0)
+  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1> %1, ptr %x1)
+  %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, ptr %x2)
   %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  0
   %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  1
   %7 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  2
@@ -154,7 +154,7 @@ entry:
 
 ; Make sure callees read the arguments correctly - tests AArch64ISelLowering::LowerFormalArguments
 
-define double @foo4(double %x0, double * %ptr1, double * %ptr2, double * %ptr3, <vscale x 8 x double> %x1, <vscale x 8 x double> %x2, <vscale x 2 x double> %x3) nounwind {
+define double @foo4(double %x0, ptr %ptr1, ptr %ptr2, ptr %ptr3, <vscale x 8 x double> %x1, <vscale x 8 x double> %x2, <vscale x 2 x double> %x3) nounwind {
 ; CHECK-LABEL: foo4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -173,16 +173,13 @@ define double @foo4(double %x0, double * %ptr1, double * %ptr2, double * %ptr3,
 ; CHECK-NEXT:    st1d { z5.d }, p0, [x2]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast double * %ptr1 to <vscale x 8 x double> *
-  store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1.bc
-  %ptr2.bc = bitcast double * %ptr2 to <vscale x 8 x double> *
-  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2.bc
-  %ptr3.bc = bitcast double * %ptr3 to <vscale x 2 x double> *
-  store volatile <vscale x 2 x double> %x3, <vscale x 2 x double>* %ptr3.bc
+  store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1
+  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2
+  store volatile <vscale x 2 x double> %x3, <vscale x 2 x double>* %ptr3
   ret double %x0
 }
 
-define double @foo5(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, double * %ptr1, double * %ptr2, double %x0, <vscale x 8 x double> %x1, <vscale x 8 x double> %x2) nounwind {
+define double @foo5(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, ptr %ptr1, ptr %ptr2, double %x0, <vscale x 8 x double> %x1, <vscale x 8 x double> %x2) nounwind {
 ; CHECK-LABEL: foo5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [sp]
@@ -201,14 +198,12 @@ define double @foo5(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, double
 ; CHECK-NEXT:    st1d { z5.d }, p0, [x7, #1, mul vl]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast double * %ptr1 to <vscale x 8 x double> *
-  store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1.bc
-  %ptr2.bc = bitcast double * %ptr2 to <vscale x 8 x double> *
-  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2.bc
+  store volatile <vscale x 8 x double> %x1, <vscale x 8 x double>* %ptr1
+  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr2
   ret double %x0
 }
 
-define double @foo6(double %x0, double %x1, double * %ptr1, double * %ptr2, <vscale x 8 x double> %x2, <vscale x 6 x double> %x3) nounwind {
+define double @foo6(double %x0, double %x1, ptr %ptr1, ptr %ptr2, <vscale x 8 x double> %x2, <vscale x 6 x double> %x3) nounwind {
 ; CHECK-LABEL: foo6:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.d
@@ -224,16 +219,14 @@ define double @foo6(double %x0, double %x1, double * %ptr1, double * %ptr2, <vsc
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x1]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast double * %ptr1 to <vscale x 8 x double> *
-  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr1.bc
-  %ptr2.bc = bitcast double * %ptr2 to <vscale x 6 x double> *
-  store volatile <vscale x 6 x double> %x3, <vscale x 6 x double>* %ptr2.bc
+  store volatile <vscale x 8 x double> %x2, <vscale x 8 x double>* %ptr1
+  store volatile <vscale x 6 x double> %x3, <vscale x 6 x double>* %ptr2
   ret double %x0
 }
 
 ; Use AAVPCS, SVE register in z0 - z7 used
 
-define void @aavpcs1(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %s6, <vscale x 4 x i32> %s7, <vscale x 4 x i32> %s8, <vscale x 4 x i32> %s9, <vscale x 4 x i32> %s10, <vscale x 4 x i32> %s11, <vscale x 4 x i32> %s12, <vscale x 4 x i32> %s13, <vscale x 4 x i32> %s14, <vscale x 4 x i32> %s15, <vscale x 4 x i32> %s16, i32 * %ptr) nounwind {
+define void @aavpcs1(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %s6, <vscale x 4 x i32> %s7, <vscale x 4 x i32> %s8, <vscale x 4 x i32> %s9, <vscale x 4 x i32> %s10, <vscale x 4 x i32> %s11, <vscale x 4 x i32> %s12, <vscale x 4 x i32> %s13, <vscale x 4 x i32> %s14, <vscale x 4 x i32> %s15, <vscale x 4 x i32> %s16, ptr %ptr) nounwind {
 ; CHECK-LABEL: aavpcs1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldp x8, x9, [sp]
@@ -251,22 +244,21 @@ define void @aavpcs1(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %
 ; CHECK-NEXT:    st1w { z3.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast i32 * %ptr to <vscale x 4 x i32> *
-  store volatile <vscale x 4 x i32> %s7, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s8, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s9, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s11, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s12, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s13, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s14, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s15, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s16, <vscale x 4 x i32>* %ptr1.bc
+  store volatile <vscale x 4 x i32> %s7, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s8, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s9, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s11, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s12, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s13, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s14, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s15, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s16, <vscale x 4 x i32>* %ptr
   ret void
 }
 
 ; Use AAVPCS, SVE register in z0 - z7 used
 
-define void @aavpcs2(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, <vscale x 4 x float> %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12,<vscale x 4 x float> %s13,<vscale x 4 x float> %s14,<vscale x 4 x float> %s15,<vscale x 4 x float> %s16,float * %ptr) nounwind {
+define void @aavpcs2(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, <vscale x 4 x float> %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12,<vscale x 4 x float> %s13,<vscale x 4 x float> %s14,<vscale x 4 x float> %s15,<vscale x 4 x float> %s16,ptr %ptr) nounwind {
 ; CHECK-LABEL: aavpcs2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldp x8, x9, [sp]
@@ -290,22 +282,21 @@ define void @aavpcs2(float %s0, float %s1, float %s2, float %s3, float %s4, floa
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast float * %ptr to <vscale x 4 x float> *
-  store volatile <vscale x 4 x float> %s7, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr1.bc
+  store volatile <vscale x 4 x float> %s7, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
   ret void
 }
 
 ; Use AAVPCS, no SVE register in z0 - z7 used (floats occupy z0 - z7) but predicate arg is used
 
-define void @aavpcs3(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, float %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12, <vscale x 4 x float> %s13, <vscale x 4 x float> %s14, <vscale x 4 x float> %s15, <vscale x 4 x float> %s16, <vscale x 4 x float> %s17, <vscale x 16 x i1> %p0, float * %ptr) nounwind {
+define void @aavpcs3(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, float %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12, <vscale x 4 x float> %s13, <vscale x 4 x float> %s14, <vscale x 4 x float> %s15, <vscale x 4 x float> %s16, <vscale x 4 x float> %s17, <vscale x 16 x i1> %p0, ptr %ptr) nounwind {
 ; CHECK-LABEL: aavpcs3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [sp]
@@ -331,22 +322,21 @@ define void @aavpcs3(float %s0, float %s1, float %s2, float %s3, float %s4, floa
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast float * %ptr to <vscale x 4 x float> *
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr1.bc
+  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
   ret void
 }
 
 ; use AAVPCS, SVE register in z0 - z7 used (i32s dont occupy z0 - z7)
 
-define void @aavpcs4(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %s6, i32 %s7, <vscale x 4 x i32> %s8, <vscale x 4 x i32> %s9, <vscale x 4 x i32> %s10, <vscale x 4 x i32> %s11, <vscale x 4 x i32> %s12, <vscale x 4 x i32> %s13, <vscale x 4 x i32> %s14, <vscale x 4 x i32> %s15, <vscale x 4 x i32> %s16, <vscale x 4 x i32> %s17, i32 * %ptr) nounwind {
+define void @aavpcs4(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %s6, i32 %s7, <vscale x 4 x i32> %s8, <vscale x 4 x i32> %s9, <vscale x 4 x i32> %s10, <vscale x 4 x i32> %s11, <vscale x 4 x i32> %s12, <vscale x 4 x i32> %s13, <vscale x 4 x i32> %s14, <vscale x 4 x i32> %s15, <vscale x 4 x i32> %s16, <vscale x 4 x i32> %s17, ptr %ptr) nounwind {
 ; CHECK-LABEL: aavpcs4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [sp]
@@ -364,22 +354,21 @@ define void @aavpcs4(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, i32 %
 ; CHECK-NEXT:    st1w { z24.s }, p0, [x9]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast i32 * %ptr to <vscale x 4 x i32> *
-  store volatile <vscale x 4 x i32> %s8, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s9, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s10, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s11, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s12, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s13, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s14, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s15, <vscale x 4 x i32>* %ptr1.bc
-  store volatile <vscale x 4 x i32> %s16, <vscale x 4 x i32>* %ptr1.bc
+  store volatile <vscale x 4 x i32> %s8, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s9, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s10, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s11, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s12, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s13, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s14, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s15, <vscale x 4 x i32>* %ptr
+  store volatile <vscale x 4 x i32> %s16, <vscale x 4 x i32>* %ptr
   ret void
 }
 
 ; Use AAVPCS, SVE register used in return
 
-define <vscale x 4 x float> @aavpcs5(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, float %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12, <vscale x 4 x float> %s13, <vscale x 4 x float> %s14, <vscale x 4 x float> %s15, <vscale x 4 x float> %s16, <vscale x 4 x float> %s17, float * %ptr) nounwind {
+define <vscale x 4 x float> @aavpcs5(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, float %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12, <vscale x 4 x float> %s13, <vscale x 4 x float> %s14, <vscale x 4 x float> %s15, <vscale x 4 x float> %s16, <vscale x 4 x float> %s17, ptr %ptr) nounwind {
 ; CHECK-LABEL: aavpcs5:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [sp]
@@ -405,20 +394,19 @@ define <vscale x 4 x float> @aavpcs5(float %s0, float %s1, float %s2, float %s3,
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast float * %ptr to <vscale x 4 x float> *
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr1.bc
+  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
   ret <vscale x 4 x float> %s8
 }
 
-define void @aapcs1(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, float %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12, <vscale x 4 x float> %s13, <vscale x 4 x float> %s14, <vscale x 4 x float> %s15, <vscale x 4 x float> %s16, <vscale x 4 x float> %s17, float * %ptr) nounwind {
+define void @aapcs1(float %s0, float %s1, float %s2, float %s3, float %s4, float %s5, float %s6, float %s7, <vscale x 4 x float> %s8, <vscale x 4 x float> %s9, <vscale x 4 x float> %s10, <vscale x 4 x float> %s11, <vscale x 4 x float> %s12, <vscale x 4 x float> %s13, <vscale x 4 x float> %s14, <vscale x 4 x float> %s15, <vscale x 4 x float> %s16, <vscale x 4 x float> %s17, ptr %ptr) nounwind {
 ; CHECK-LABEL: aapcs1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr x8, [sp]
@@ -444,16 +432,15 @@ define void @aapcs1(float %s0, float %s1, float %s2, float %s3, float %s4, float
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x8]
 ; CHECK-NEXT:    ret
 entry:
-  %ptr1.bc = bitcast float * %ptr to <vscale x 4 x float> *
-  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr1.bc
-  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr1.bc
+  store volatile <vscale x 4 x float> %s8, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s9, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s10, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s11, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s12, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s13, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s14, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s15, <vscale x 4 x float>* %ptr
+  store volatile <vscale x 4 x float> %s16, <vscale x 4 x float>* %ptr
   ret void
 }
 
@@ -719,9 +706,9 @@ declare float @callee3(float, float, <vscale x 8 x double>, <vscale x 6 x double
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>)
-declare {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1>, double*)
-declare {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1>, double*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
+declare {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1>, ptr)
+declare {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, ptr)
 declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
 declare <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double>, <vscale x 2 x double>, i64)
 declare <vscale x 6 x double> @llvm.vector.insert.nxv6f64.nx2f64(<vscale x 6 x double>, <vscale x 2 x double>, i64)

diff  --git a/llvm/test/CodeGen/AArch64/sve-coalesce-ptrue-intrinsics.ll b/llvm/test/CodeGen/AArch64/sve-coalesce-ptrue-intrinsics.ll
index bab682c7da92f..96eff588c439f 100644
--- a/llvm/test/CodeGen/AArch64/sve-coalesce-ptrue-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/sve-coalesce-ptrue-intrinsics.ll
@@ -6,54 +6,54 @@ declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 immarg)
 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 immarg)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg)
 
-declare <vscale x 16 x i32> @llvm.aarch64.sve.ld1.nxv16i32(<vscale x 16 x i1>, i32*)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, i32*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1>, i32*)
+declare <vscale x 16 x i32> @llvm.aarch64.sve.ld1.nxv16i32(<vscale x 16 x i1>, ptr)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1>, ptr)
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1>)
 
 ; Two calls to the SVE ptrue intrinsic. %1 is redundant, and can be expressed as an SVE reinterpret of %3 via
 ; convert.{to,from}.svbool.
-define <vscale x 8 x i32> @coalesce_test_basic(i32* %addr) {
+define <vscale x 8 x i32> @coalesce_test_basic(ptr %addr) {
 ; CHECK-LABEL: @coalesce_test_basic(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP1]])
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP2]])
-; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP3]], i32* [[ADDR:%.*]])
-; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP1]], i32* [[ADDR]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP3]], ptr [[ADDR:%.*]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP1]], ptr [[ADDR]])
 ; CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP5]]
 ;
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, i32* %addr)
+  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, ptr %addr)
   %3 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, i32* %addr)
+  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, ptr %addr)
   ret <vscale x 8 x i32> %4
 }
 
 ; Two calls to the SVE ptrue intrinsic with the SV_POW2 pattern. This should reduce to the same output as
 ; coalesce_test_basic.
-define <vscale x 8 x i32> @coalesce_test_pow2(i32* %addr) {
+define <vscale x 8 x i32> @coalesce_test_pow2(ptr %addr) {
 ; CHECK-LABEL: @coalesce_test_pow2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 0)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP1]])
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP2]])
-; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP3]], i32* [[ADDR:%.*]])
-; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP1]], i32* [[ADDR]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP3]], ptr [[ADDR:%.*]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP1]], ptr [[ADDR]])
 ; CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP5]]
 ;
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 0)
-  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, i32* %addr)
+  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, ptr %addr)
   %3 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 0)
-  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, i32* %addr)
+  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, ptr %addr)
   ret <vscale x 8 x i32> %4
 }
 
 ; Four calls to the SVE ptrue intrinsic; two with the SV_ALL patterns, and two with the SV_POW2 pattern. The
 ; two SV_ALL ptrue intrinsics should be coalesced, and the two SV_POW2 intrinsics should be colaesced.
-define <vscale x 8 x i32> @coalesce_test_all_and_pow2(i32* %addr) {
+define <vscale x 8 x i32> @coalesce_test_all_and_pow2(ptr %addr) {
 ; CHECK-LABEL: @coalesce_test_all_and_pow2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 0)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP1]])
@@ -61,10 +61,10 @@ define <vscale x 8 x i32> @coalesce_test_all_and_pow2(i32* %addr) {
 ; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP4]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP5]])
-; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP3]], i32* [[ADDR:%.*]])
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP1]], i32* [[ADDR]])
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP6]], i32* [[ADDR]])
-; CHECK-NEXT:    [[TMP10:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP4]], i32* [[ADDR]])
+; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP3]], ptr [[ADDR:%.*]])
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP1]], ptr [[ADDR]])
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP6]], ptr [[ADDR]])
+; CHECK-NEXT:    [[TMP10:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP4]], ptr [[ADDR]])
 ; CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP10]]
 ;
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 0)
@@ -72,113 +72,113 @@ define <vscale x 8 x i32> @coalesce_test_all_and_pow2(i32* %addr) {
   %3 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
   %4 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
 
-  %5 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, i32* %addr)
-  %6 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %2, i32* %addr)
-  %7 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %3, i32* %addr)
-  %8 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %4, i32* %addr)
+  %5 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, ptr %addr)
+  %6 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %2, ptr %addr)
+  %7 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %3, ptr %addr)
+  %8 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %4, ptr %addr)
   ret <vscale x 8 x i32> %8
 }
 
 
 ; Two calls to the SVE ptrue intrinsic: one with the SV_ALL pattern, another with the SV_POW2 pattern. The
 ; patterns are incompatible, so they should not be coalesced.
-define <vscale x 8 x i32> @coalesce_test_pattern_mismatch2(i32* %addr) {
+define <vscale x 8 x i32> @coalesce_test_pattern_mismatch2(ptr %addr) {
 ; CHECK-LABEL: @coalesce_test_pattern_mismatch2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 0)
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], i32* [[ADDR:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], ptr [[ADDR:%.*]])
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP3]], i32* [[ADDR]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP3]], ptr [[ADDR]])
 ; CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP4]]
 ;
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 0)
-  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, i32* %addr)
+  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, ptr %addr)
   %3 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, i32* %addr)
+  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, ptr %addr)
   ret <vscale x 8 x i32> %4
 }
 
 ; Two calls to the SVE ptrue intrinsic with the SV_VL1 pattern. This pattern is not currently recognised, so
 ; nothing should be done here.
-define <vscale x 8 x i32> @coalesce_test_bad_pattern(i32* %addr) {
+define <vscale x 8 x i32> @coalesce_test_bad_pattern(ptr %addr) {
 ; CHECK-LABEL: @coalesce_test_bad_pattern(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 1)
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], i32* [[ADDR:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], ptr [[ADDR:%.*]])
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 1)
-; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP3]], i32* [[ADDR]])
+; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP3]], ptr [[ADDR]])
 ; CHECK-NEXT:    ret <vscale x 8 x i32> [[TMP4]]
 ;
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 1)
-  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, i32* %addr)
+  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, ptr %addr)
   %3 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 1)
-  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, i32* %addr)
+  %4 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %3, ptr %addr)
   ret <vscale x 8 x i32> %4
 }
 
 ; Four calls to the SVE ptrue intrinsic. %7 is the most encompassing, and the others can be expressed as an
 ; SVE reinterprets of %7 via convert.{to,from}.svbool.
-define <vscale x 16 x i32> @coalesce_test_multiple(i32* %addr) {
+define <vscale x 16 x i32> @coalesce_test_multiple(ptr %addr) {
 ; CHECK-LABEL: @coalesce_test_multiple(
 ; CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv16i1(<vscale x 16 x i1> [[TMP1]])
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP2]])
 ; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[TMP2]])
 ; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[TMP2]])
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> [[TMP5]], i32* [[ADDR:%.*]])
-; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP4]], i32* [[ADDR]])
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP3]], i32* [[ADDR]])
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.ld1.nxv16i32(<vscale x 16 x i1> [[TMP1]], i32* [[ADDR]])
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> [[TMP5]], ptr [[ADDR:%.*]])
+; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP4]], ptr [[ADDR]])
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> [[TMP3]], ptr [[ADDR]])
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 16 x i32> @llvm.aarch64.sve.ld1.nxv16i32(<vscale x 16 x i1> [[TMP1]], ptr [[ADDR]])
 ; CHECK-NEXT:    ret <vscale x 16 x i32> [[TMP9]]
 ;
   %1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
-  %2 = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %1, i32* %addr)
+  %2 = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %1, ptr %addr)
   %3 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %4 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %3, i32* %addr)
+  %4 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %3, ptr %addr)
   %5 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %6 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %5, i32* %addr)
+  %6 = call <vscale x 8 x i32> @llvm.aarch64.sve.ld1.nxv8i32(<vscale x 8 x i1> %5, ptr %addr)
   %7 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
-  %8 = call <vscale x 16 x i32> @llvm.aarch64.sve.ld1.nxv16i32(<vscale x 16 x i1> %7, i32* %addr)
+  %8 = call <vscale x 16 x i32> @llvm.aarch64.sve.ld1.nxv16i32(<vscale x 16 x i1> %7, ptr %addr)
   ret <vscale x 16 x i32> %8
 }
 
 ; Two calls to the SVE ptrue intrinsic which are both of the same size. In this case, one should be identified
 ; as redundant and rewritten and an SVE reinterpret of the other via the convert.{to,from}.svbool intrinsics.
 ; This introduces a redundant conversion which will then be eliminated.
-define <vscale x 4 x i32> @coalesce_test_same_size(i32* %addr) {
+define <vscale x 4 x i32> @coalesce_test_same_size(ptr %addr) {
 ; CHECK-LABEL: @coalesce_test_same_size(
 ; CHECK-NEXT:    [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], i32* [[ADDR:%.*]])
-; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], i32* [[ADDR]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], ptr [[ADDR:%.*]])
+; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP1]], ptr [[ADDR]])
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP3]]
 ;
   %1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, i32* %addr)
+  %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, ptr %addr)
   %3 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
-  %4 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %3, i32* %addr)
+  %4 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %3, ptr %addr)
   ret <vscale x 4 x i32> %4
 }
 
 ; Two calls to the SVE ptrue intrinsic, but neither can be eliminated; %1 is promoted to become %3, which
 ; means eliminating this call to the SVE ptrue intrinsic would involve creating a longer, irreducible chain of
 ; conversions. Better codegen is achieved by just leaving the ptrue as-is.
-define <vscale x 8 x i16> @coalesce_test_promoted_ptrue(i32* %addr1, i16* %addr2) {
+define <vscale x 8 x i16> @coalesce_test_promoted_ptrue(ptr %addr1, ptr %addr2) {
 ; CHECK-LABEL: @coalesce_test_promoted_ptrue(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP2]])
 ; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[TMP3]])
-; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP2]], i32* [[ADDR1:%.*]])
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> [[TMP4]], i16* [[ADDR2:%.*]])
-; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> [[TMP1]], i16* [[ADDR2]])
+; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> [[TMP2]], ptr [[ADDR1:%.*]])
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> [[TMP4]], ptr [[ADDR2:%.*]])
+; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> [[TMP1]], ptr [[ADDR2]])
 ; CHECK-NEXT:    ret <vscale x 8 x i16> [[TMP7]]
 ;
   %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
   %2 = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
   %3 = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %2)
 
-  %4 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, i32* %addr1)
-  %5 = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %3, i16* %addr2)
+  %4 = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %1, ptr %addr1)
+  %5 = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %3, ptr %addr2)
 
   %6 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
-  %7 = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %6, i16* %addr2)
+  %7 = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %6, ptr %addr2)
   ret <vscale x 8 x i16> %7
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll b/llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll
index 0a339ffb0546b..05944346e299c 100644
--- a/llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fix-length-and-combine-512.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=512  -o - < %s | FileCheck %s
 
-define void @vls_sve_and_64xi8(<64 x i8>* %ap, <64 x i8>* %out) nounwind {
+define void @vls_sve_and_64xi8(ptr %ap, ptr %out) nounwind {
 ; CHECK-LABEL: vls_sve_and_64xi8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, .LCPI0_0
@@ -12,16 +12,16 @@ define void @vls_sve_and_64xi8(<64 x i8>* %ap, <64 x i8>* %out) nounwind {
 ; CHECK-NEXT:    and z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x1]
 ; CHECK-NEXT:    ret
- %a = load <64 x i8>, <64 x i8>* %ap
+ %a = load <64 x i8>, ptr %ap
  %b = and <64 x i8> %a, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255,
                          i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255,
                          i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255,
                          i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
- store <64 x i8> %b, <64 x i8>* %out
+ store <64 x i8> %b, ptr %out
  ret void
 }
 
-define <16 x i8> @vls_sve_and_16xi8(<16 x i8> %b, <16 x i8>* %out) nounwind {
+define <16 x i8> @vls_sve_and_16xi8(<16 x i8> %b, ptr %out) nounwind {
 ; CHECK-LABEL: vls_sve_and_16xi8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    bic v0.8h, #255
@@ -30,7 +30,7 @@ define <16 x i8> @vls_sve_and_16xi8(<16 x i8> %b, <16 x i8>* %out) nounwind {
  ret <16 x i8> %c
 }
 
-define <8 x i8> @vls_sve_and_8xi8(<8 x i8> %b, <8 x i8>* %out) nounwind {
+define <8 x i8> @vls_sve_and_8xi8(<8 x i8> %b, ptr %out) nounwind {
 ; CHECK-LABEL: vls_sve_and_8xi8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    bic v0.4h, #255

diff  --git a/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll b/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
index 01f1165042607..f4ac22f7a6b04 100644
--- a/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fold-vscale.ll
@@ -4,7 +4,7 @@
 ; Check that vscale call is recognised by load/store reg/reg pattern and
 ; partially folded, with the rest pulled out of the loop.
 
-define void @ld1w_reg_loop([32000 x i32]* %addr) {
+define void @ld1w_reg_loop(ptr %addr) {
 ; CHECK-LABEL: ld1w_reg_loop:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -24,18 +24,17 @@ entry:
 
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %2 = getelementptr inbounds [32000 x i32], [32000 x i32]* %addr, i64 0, i64 %index
-  %3 = bitcast i32* %2 to <vscale x 4 x i32>*
-  %load = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %3, align 16
+  %2 = getelementptr inbounds [32000 x i32], ptr %addr, i64 0, i64 %index
+  %load = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %2, align 16
   %index.next = add i64 %index, %1
-  %4 = icmp eq i64 %index.next, 0
-  br i1 %4, label %for.cond.cleanup, label %vector.body
+  %3 = icmp eq i64 %index.next, 0
+  br i1 %3, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void
 }
 
-define void @st1w_reg_loop([32000 x i32]* %addr, <vscale x 4 x i32> %val) {
+define void @st1w_reg_loop(ptr %addr, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1w_reg_loop:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -55,12 +54,11 @@ entry:
 
 vector.body:
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
-  %2 = getelementptr inbounds [32000 x i32], [32000 x i32]* %addr, i64 0, i64 %index
-  %3 = bitcast i32* %2 to <vscale x 4 x i32>*
-  store volatile <vscale x 4 x i32> %val, <vscale x 4 x i32>* %3, align 16
+  %2 = getelementptr inbounds [32000 x i32], ptr %addr, i64 0, i64 %index
+  store volatile <vscale x 4 x i32> %val, <vscale x 4 x i32>* %2, align 16
   %index.next = add i64 %index, %1
-  %4 = icmp eq i64 %index.next, 0
-  br i1 %4, label %for.cond.cleanup, label %vector.body
+  %3 = icmp eq i64 %index.next, 0
+  br i1 %3, label %for.cond.cleanup, label %vector.body
 
 for.cond.cleanup:
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/sve-fp.ll b/llvm/test/CodeGen/AArch64/sve-fp.ll
index 19bc2c5d02354..7d54f971f61ec 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp.ll
@@ -494,7 +494,7 @@ define <vscale x 2 x double> @frsqrts_d(<vscale x 2 x double> %a, <vscale x 2 x
 
 %complex = type { { double, double } }
 
-define void @scalar_to_vector(%complex* %outval, <vscale x 2 x i1> %pred, <vscale x 2 x double> %in1, <vscale x 2 x double> %in2) {
+define void @scalar_to_vector(ptr %outval, <vscale x 2 x i1> %pred, <vscale x 2 x double> %in1, <vscale x 2 x double> %in2) {
 ; CHECK-LABEL: scalar_to_vector:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    faddv d0, p0, z0.d
@@ -502,12 +502,11 @@ define void @scalar_to_vector(%complex* %outval, <vscale x 2 x i1> %pred, <vscal
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
-  %realp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 0
-  %imagp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 1
+  %imagp = getelementptr inbounds %complex, ptr %outval, i64 0, i32 0, i32 1
   %1 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %in1)
   %2 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %in2)
-  store double %1, double* %realp, align 8
-  store double %2, double* %imagp, align 8
+  store double %1, ptr %outval, align 8
+  store double %2, ptr %imagp, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
index bdede039a1202..b32dad71bbcb5 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
@@ -3,7 +3,7 @@
 
 
 ; Ensure we use a "vscale x 4" wide scatter for the maximum supported offset.
-define void @scatter_i8_index_offset_maximum(i8* %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
+define void @scatter_i8_index_offset_maximum(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
 ; CHECK-LABEL: scatter_i8_index_offset_maximum:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #33554431
@@ -18,13 +18,13 @@ define void @scatter_i8_index_offset_maximum(i8* %base, i64 %offset, <vscale x 4
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
-  %t6 = getelementptr i8, i8* %base, <vscale x 4 x i64> %t5
-  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8*> %t6, i32 2, <vscale x 4 x i1> %pg)
+  %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
+  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; Ensure we use a "vscale x 4" wide scatter for the minimum supported offset.
-define void @scatter_i16_index_offset_minimum(i16* %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i16> %data) #0 {
+define void @scatter_i16_index_offset_minimum(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i16> %data) #0 {
 ; CHECK-LABEL: scatter_i16_index_offset_minimum:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #-33554432
@@ -39,13 +39,13 @@ define void @scatter_i16_index_offset_minimum(i16* %base, i64 %offset, <vscale x
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
-  %t6 = getelementptr i16, i16* %base, <vscale x 4 x i64> %t5
-  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i16*> %t6, i32 2, <vscale x 4 x i1> %pg)
+  %t6 = getelementptr i16, ptr %base, <vscale x 4 x i64> %t5
+  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; Ensure we use a "vscale x 4" gather for an offset in the limits of 32 bits.
-define <vscale x 4 x i8> @gather_i8_index_offset_8(i8* %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
+define <vscale x 4 x i8> @gather_i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
 ; CHECK-LABEL: gather_i8_index_offset_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, x1
@@ -59,8 +59,8 @@ define <vscale x 4 x i8> @gather_i8_index_offset_8(i8* %base, i64 %offset, <vsca
   %splat1 = shufflevector <vscale x 4 x i64> %splat.insert1, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t1 = mul <vscale x 4 x i64> %splat1, %step
   %t2 = add <vscale x 4 x i64> %splat0, %t1
-  %t3 = getelementptr i8, i8* %base, <vscale x 4 x i64> %t2
-  %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*> %t3, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
+  %t3 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t2
+  %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t3, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
    ret <vscale x 4 x i8> %load
 }
 
@@ -68,7 +68,7 @@ define <vscale x 4 x i8> @gather_i8_index_offset_8(i8* %base, i64 %offset, <vsca
 
 ; Ensure we don't use a "vscale x 4" scatter. Cannot prove that variable stride
 ; will not wrap when shrunk to be i32 based.
-define void @scatter_f16_index_offset_var(half* %base, i64 %offset, i64 %scale, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
+define void @scatter_f16_index_offset_var(ptr %base, i64 %offset, i64 %scale, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
 ; CHECK-LABEL: scatter_f16_index_offset_var:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    index z1.d, #0, #1
@@ -93,13 +93,13 @@ define void @scatter_f16_index_offset_var(half* %base, i64 %offset, i64 %scale,
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
-  %t6 = getelementptr half, half* %base, <vscale x 4 x i64> %t5
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %t6, i32 2, <vscale x 4 x i1> %pg)
+  %t6 = getelementptr half, ptr %base, <vscale x 4 x i64> %t5
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; Ensure we don't use a "vscale x 4" wide scatter when the offset is too big.
-define void @scatter_i8_index_offset_maximum_plus_one(i8* %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
+define void @scatter_i8_index_offset_maximum_plus_one(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
 ; CHECK-LABEL: scatter_i8_index_offset_maximum_plus_one:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -123,13 +123,13 @@ define void @scatter_i8_index_offset_maximum_plus_one(i8* %base, i64 %offset, <v
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
-  %t6 = getelementptr i8, i8* %base, <vscale x 4 x i64> %t5
-  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8*> %t6, i32 2, <vscale x 4 x i1> %pg)
+  %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
+  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; Ensure we don't use a "vscale x 4" wide scatter when the offset is too small.
-define void @scatter_i8_index_offset_minimum_minus_one(i8* %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
+define void @scatter_i8_index_offset_minimum_minus_one(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
 ; CHECK-LABEL: scatter_i8_index_offset_minimum_minus_one:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -154,13 +154,13 @@ define void @scatter_i8_index_offset_minimum_minus_one(i8* %base, i64 %offset, <
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
-  %t6 = getelementptr i8, i8* %base, <vscale x 4 x i64> %t5
-  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8*> %t6, i32 2, <vscale x 4 x i1> %pg)
+  %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
+  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; Ensure we don't use a "vscale x 4" wide scatter when the stride is too big .
-define void @scatter_i8_index_stride_too_big(i8* %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
+define void @scatter_i8_index_stride_too_big(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
 ; CHECK-LABEL: scatter_i8_index_stride_too_big:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #1
@@ -184,15 +184,15 @@ define void @scatter_i8_index_stride_too_big(i8* %base, i64 %offset, <vscale x 4
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
-  %t6 = getelementptr i8, i8* %base, <vscale x 4 x i64> %t5
-  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8*> %t6, i32 2, <vscale x 4 x i1> %pg)
+  %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
+  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t6, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; Ensure the resulting load is "vscale x 4" wide, despite the offset giving the
 ; impression the gather must be split due to it's <vscale x 4 x i64> offset.
 ; gather_f32(base, index(offset, 8 * sizeof(float))
-define <vscale x 4 x i8> @gather_8i8_index_offset_8([8 x i8]* %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
+define <vscale x 4 x i8> @gather_8i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
 ; CHECK-LABEL: gather_8i8_index_offset_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, x1, lsl #3
@@ -203,16 +203,16 @@ define <vscale x 4 x i8> @gather_8i8_index_offset_8([8 x i8]* %base, i64 %offset
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
-  %t3 = getelementptr [8 x i8], [8 x i8]* %base, <vscale x 4 x i64> %t2
-  %t4 = bitcast <vscale x 4 x [8 x i8]*> %t3 to <vscale x 4 x i8*>
-  %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
+  %t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
+  %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
+  %load = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x i8> undef)
   ret <vscale x 4 x i8> %load
 }
 
 ; Ensure the resulting load is "vscale x 4" wide, despite the offset giving the
 ; impression the gather must be split due to it's <vscale x 4 x i64> offset.
 ; gather_f32(base, index(offset, 8 * sizeof(float))
-define <vscale x 4 x float> @gather_f32_index_offset_8([8 x float]* %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
+define <vscale x 4 x float> @gather_f32_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg) #0 {
 ; CHECK-LABEL: gather_f32_index_offset_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32
@@ -224,16 +224,16 @@ define <vscale x 4 x float> @gather_f32_index_offset_8([8 x float]* %base, i64 %
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
-  %t3 = getelementptr [8 x float], [8 x float]* %base, <vscale x 4 x i64> %t2
-  %t4 = bitcast <vscale x 4 x [8 x float]*> %t3 to <vscale x 4 x float*>
-  %load = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x float> undef)
+  %t3 = getelementptr [8 x float], ptr %base, <vscale x 4 x i64> %t2
+  %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
+  %load = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %t4, i32 4, <vscale x 4 x i1> %pg, <vscale x 4 x float> undef)
   ret <vscale x 4 x float> %load
 }
 
 ; Ensure the resulting store is "vscale x 4" wide, despite the offset giving the
 ; impression the scatter must be split due to it's <vscale x 4 x i64> offset.
 ; scatter_f16(base, index(offset, 8 * sizeof(i8))
-define void @scatter_i8_index_offset_8([8 x i8]* %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
+define void @scatter_i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x i8> %data) #0 {
 ; CHECK-LABEL: scatter_i8_index_offset_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, x1, lsl #3
@@ -244,16 +244,16 @@ define void @scatter_i8_index_offset_8([8 x i8]* %base, i64 %offset, <vscale x 4
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
-  %t3 = getelementptr [8 x i8], [8 x i8]* %base, <vscale x 4 x i64> %t2
-  %t4 = bitcast <vscale x 4 x [8 x i8]*> %t3 to <vscale x 4 x i8*>
-  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8*> %t4, i32 2, <vscale x 4 x i1> %pg)
+  %t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
+  %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %t4, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; Ensure the resulting store is "vscale x 4" wide, despite the offset giving the
 ; impression the scatter must be split due to it's <vscale x 4 x i64> offset.
 ; scatter_f16(base, index(offset, 8 * sizeof(half))
-define void @scatter_f16_index_offset_8([8 x half]* %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
+define void @scatter_f16_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
 ; CHECK-LABEL: scatter_f16_index_offset_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #16
@@ -265,14 +265,14 @@ define void @scatter_f16_index_offset_8([8 x half]* %base, i64 %offset, <vscale
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
-  %t3 = getelementptr [8 x half], [8 x half]* %base, <vscale x 4 x i64> %t2
-  %t4 = bitcast <vscale x 4 x [8 x half]*> %t3 to <vscale x 4 x half*>
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %t4, i32 2, <vscale x 4 x i1> %pg)
+  %t3 = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %t2
+  %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %t4, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; stepvector is hidden further behind GEP and two adds.
-define void @scatter_f16_index_add_add([8 x half]* %base, i64 %offset, i64 %offset2, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
+define void @scatter_f16_index_add_add(ptr %base, i64 %offset, i64 %offset2, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
 ; CHECK-LABEL: scatter_f16_index_add_add:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #16
@@ -288,14 +288,14 @@ define void @scatter_f16_index_add_add([8 x half]* %base, i64 %offset, i64 %offs
   %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
   %add1 = add <vscale x 4 x i64> %splat.offset, %step
   %add2 = add <vscale x 4 x i64> %add1, %splat.offset2
-  %gep = getelementptr [8 x half], [8 x half]* %base, <vscale x 4 x i64> %add2
-  %gep.bc = bitcast <vscale x 4 x [8 x half]*> %gep to <vscale x 4 x half*>
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %gep.bc, i32 2, <vscale x 4 x i1> %pg)
+  %gep = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %add2
+  %gep.bc = bitcast <vscale x 4 x ptr> %gep to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %gep.bc, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
 ; stepvector is hidden further behind GEP two adds and a shift.
-define void @scatter_f16_index_add_add_mul([8 x half]* %base, i64 %offset, i64 %offset2, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
+define void @scatter_f16_index_add_add_mul(ptr %base, i64 %offset, i64 %offset2, <vscale x 4 x i1> %pg, <vscale x 4 x half> %data) #0 {
 ; CHECK-LABEL: scatter_f16_index_add_add_mul:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #128
@@ -314,9 +314,9 @@ define void @scatter_f16_index_add_add_mul([8 x half]* %base, i64 %offset, i64 %
   %splat.const8.ins = insertelement <vscale x 4 x i64> undef, i64 8, i32 0
   %splat.const8 = shufflevector <vscale x 4 x i64> %splat.const8.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %mul = mul <vscale x 4 x i64> %add2, %splat.const8
-  %gep = getelementptr [8 x half], [8 x half]* %base, <vscale x 4 x i64> %mul
-  %gep.bc = bitcast <vscale x 4 x [8 x half]*> %gep to <vscale x 4 x half*>
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %gep.bc, i32 2, <vscale x 4 x i1> %pg)
+  %gep = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %mul
+  %gep.bc = bitcast <vscale x 4 x ptr> %gep to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %gep.bc, i32 2, <vscale x 4 x i1> %pg)
   ret void
 }
 
@@ -326,8 +326,8 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_const_with_vec_offsets(<vscale
 ; CHECK-NEXT:    mov w8, #8
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i64, i64* inttoptr (i64 8 to i64*), <vscale x 2 x i64> %vector_offsets
-  %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
+  %ptrs = getelementptr i64, ptr inttoptr (i64 8 to ptr), <vscale x 2 x i64> %vector_offsets
+  %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %data
 }
 
@@ -340,8 +340,8 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with_vec_plus_scalar_offse
   %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 %scalar_offset, i64 0
   %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
-  %ptrs = getelementptr i64, i64* null, <vscale x 2 x i64> %offsets
-  %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
+  %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
+  %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %data
 }
 
@@ -354,12 +354,12 @@ define <vscale x 2 x i64> @masked_gather_nxv2i64_null_with__vec_plus_imm_offsets
   %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 1, i64 0
   %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
-  %ptrs = getelementptr i64, i64* null, <vscale x 2 x i64> %offsets
-  %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
+  %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
+  %data = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %data
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i32_s8_offsets(i32* %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x i32> @masked_gather_nxv4i32_s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4i32_s8_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.s
@@ -367,24 +367,24 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32_s8_offsets(i32* %base, <vscale
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i32> %offsets.sext
-  %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.sext
+  %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %data
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i32_u8_offsets(i32* %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x i32> @masked_gather_nxv4i32_u8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4i32_u8_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.s, z0.s, #0xff
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i32> %offsets.zext
-  %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.zext
+  %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %data
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i32_u32s8_offsets(i32* %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x i32> @masked_gather_nxv4i32_u32s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4i32_u32s8_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.s
@@ -393,8 +393,8 @@ define <vscale x 4 x i32> @masked_gather_nxv4i32_u32s8_offsets(i32* %base, <vsca
 ; CHECK-NEXT:    ret
   %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
   %offsets.sext.zext = zext <vscale x 4 x i32> %offsets.sext to <vscale x 4 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i64> %offsets.sext.zext
-  %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.sext.zext
+  %data = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %data
 }
 
@@ -404,8 +404,8 @@ define void @masked_scatter_nxv2i64_const_with_vec_offsets(<vscale x 2 x i64> %v
 ; CHECK-NEXT:    mov w8, #8
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i64, i64* inttoptr (i64 8 to i64*), <vscale x 2 x i64> %vector_offsets
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %pg)
+  %ptrs = getelementptr i64, ptr inttoptr (i64 8 to ptr), <vscale x 2 x i64> %vector_offsets
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
   ret void
 }
 
@@ -418,8 +418,8 @@ define void @masked_scatter_nxv2i64_null_with_vec_plus_scalar_offsets(<vscale x
   %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 %scalar_offset, i64 0
   %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
-  %ptrs = getelementptr i64, i64* null, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %pg)
+  %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
   ret void
 }
 
@@ -432,12 +432,12 @@ define void @masked_scatter_nxv2i64_null_with__vec_plus_imm_offsets(<vscale x 2
   %scalar_offset.ins = insertelement <vscale x 2 x i64> undef, i64 1, i64 0
   %scalar_offset.splat = shufflevector <vscale x 2 x i64> %scalar_offset.ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %offsets = add <vscale x 2 x i64> %vector_offsets, %scalar_offset.splat
-  %ptrs = getelementptr i64, i64* null, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %pg)
+  %ptrs = getelementptr i64, ptr null, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %pg)
   ret void
 }
 
-define void @masked_scatter_nxv4i32_s8_offsets(i32* %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
+define void @masked_scatter_nxv4i32_s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
 ; CHECK-LABEL: masked_scatter_nxv4i32_s8_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.s
@@ -445,24 +445,24 @@ define void @masked_scatter_nxv4i32_s8_offsets(i32* %base, <vscale x 4 x i8> %of
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i32> %offsets.sext
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.sext
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv4i32_u8_offsets(i32* %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
+define void @masked_scatter_nxv4i32_u8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
 ; CHECK-LABEL: masked_scatter_nxv4i32_u8_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and z0.s, z0.s, #0xff
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i32> %offsets.zext
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets.zext
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv4i32_u32s8_offsets(i32* %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
+define void @masked_scatter_nxv4i32_u32s8_offsets(ptr %base, <vscale x 4 x i8> %offsets, <vscale x 4 x i1> %mask, <vscale x 4 x i32> %data) #0 {
 ; CHECK-LABEL: masked_scatter_nxv4i32_u32s8_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.s
@@ -471,22 +471,22 @@ define void @masked_scatter_nxv4i32_u32s8_offsets(i32* %base, <vscale x 4 x i8>
 ; CHECK-NEXT:    ret
   %offsets.sext = sext <vscale x 4 x i8> %offsets to <vscale x 4 x i32>
   %offsets.sext.zext = zext <vscale x 4 x i32> %offsets.sext to <vscale x 4 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i64> %offsets.sext.zext
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.sext.zext
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask)
   ret void
 }
 
 attributes #0 = { "target-features"="+sve" vscale_range(1, 16) }
 
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half*>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
 
 declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
index 4bcdaded9c28b..8b5f23e5b0987 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-contiguous-prefetches.ll
@@ -5,113 +5,113 @@
 ;
 ; Testing prfop encodings
 ;
-define void @test_svprf_pldl1strm(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pldl1strm(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pldl1strm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 1)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 1)
   ret void
 }
 
-define void @test_svprf_pldl2keep(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pldl2keep(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pldl2keep:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pldl2keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 2)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 2)
   ret void
 }
 
-define void @test_svprf_pldl2strm(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pldl2strm(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pldl2strm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pldl2strm, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 3)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 3)
   ret void
 }
 
-define void @test_svprf_pldl3keep(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pldl3keep(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pldl3keep:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pldl3keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 4)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 4)
   ret void
 }
 
-define void @test_svprf_pldl3strm(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pldl3strm(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pldl3strm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pldl3strm, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 5)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 5)
   ret void
 }
 
-define void @test_svprf_pstl1keep(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pstl1keep(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pstl1keep:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl1keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 8)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 8)
   ret void
 }
 
-define void @test_svprf_pstl1strm(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pstl1strm(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pstl1strm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl1strm, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 9)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 9)
   ret void
 }
 
-define void @test_svprf_pstl2keep(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pstl2keep(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pstl2keep:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl2keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 10)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 10)
   ret void
 }
 
-define void @test_svprf_pstl2strm(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pstl2strm(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pstl2strm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl2strm, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 11)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 11)
   ret void
 }
 
-define void @test_svprf_pstl3keep(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pstl3keep(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pstl3keep:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl3keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 12)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 12)
   ret void
 }
 
-define void @test_svprf_pstl3strm(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprf_pstl3strm(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprf_pstl3strm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl3strm, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 13)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 13)
   ret void
 }
 
@@ -130,7 +130,7 @@ define void @test_svprf_vnum_under(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %
 ; CHECK-NEXT:    ret
 entry:
   %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 -33, i64 0
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %gep, i32 13)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
@@ -141,7 +141,7 @@ define void @test_svprf_vnum_min(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %ba
 ; CHECK-NEXT:    ret
 entry:
   %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 -32, i64 0
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %gep, i32 13)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
@@ -156,7 +156,7 @@ define void @test_svprf_vnum_over(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %b
 ; CHECK-NEXT:    ret
 entry:
   %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 32, i64 0
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %gep, i32 13)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
@@ -167,7 +167,7 @@ define void @test_svprf_vnum_max(<vscale x 16 x i1> %pg, <vscale x 16 x i8>* %ba
 ; CHECK-NEXT:    ret
 entry:
   %gep = getelementptr inbounds <vscale x 16 x i8>, <vscale x 16 x i8>* %base, i64 31, i64 0
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %gep, i32 13)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
@@ -175,43 +175,43 @@ entry:
 ; scalar contiguous
 ;
 
-define void @test_svprfb(<vscale x 16 x i1> %pg, i8* %base) {
+define void @test_svprfb(<vscale x 16 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprfb:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pldl1keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %base, i32 0)
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %base, i32 0)
   ret void
 }
 
-define void @test_svprfh(<vscale x 8 x i1> %pg, i8* %base) {
+define void @test_svprfh(<vscale x 8 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprfh:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfh pldl1keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1> %pg, i8* %base, i32 0)
+  tail call void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1> %pg, ptr %base, i32 0)
   ret void
 }
 
-define void @test_svprfw(<vscale x 4 x i1> %pg, i8* %base) {
+define void @test_svprfw(<vscale x 4 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprfw:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfw pldl1keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, i8* %base, i32 0)
+  tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, ptr %base, i32 0)
   ret void
 }
 
-define void @test_svprfd(<vscale x 2 x i1> %pg, i8* %base) {
+define void @test_svprfd(<vscale x 2 x i1> %pg, ptr %base) {
 ; CHECK-LABEL: test_svprfd:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfd pldl1keep, p0, [x0]
 ; CHECK-NEXT:    ret
 entry:
-  tail call void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1> %pg, i8* %base, i32 0)
+  tail call void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1> %pg, ptr %base, i32 0)
   ret void
 }
 
@@ -227,8 +227,8 @@ define void @test_svprfh_vnum(<vscale x 8 x i1> %pg, <vscale x 8 x i16>* %base)
 ; CHECK-NEXT:    ret
 entry:
   %gep = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base, i64 31
-  %addr = bitcast <vscale x 8 x i16>* %gep to i8*
-  tail call void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1> %pg, i8* %addr, i32 13)
+  %addr = bitcast <vscale x 8 x i16>* %gep to ptr
+  tail call void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1> %pg, ptr %addr, i32 13)
   ret void
 }
 
@@ -239,8 +239,8 @@ define void @test_svprfw_vnum(<vscale x 4 x i1> %pg, <vscale x 4 x i32>* %base)
 ; CHECK-NEXT:    ret
 entry:
   %gep = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base, i64 31
-  %addr = bitcast <vscale x 4 x i32>* %gep to i8*
-  tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, i8* %addr, i32 13)
+  %addr = bitcast <vscale x 4 x i32>* %gep to ptr
+  tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, ptr %addr, i32 13)
   ret void
 }
 
@@ -251,8 +251,8 @@ define void @test_svprfd_vnum(<vscale x 2 x i1> %pg, <vscale x 2 x i64>* %base)
 ; CHECK-NEXT:    ret
 entry:
   %gep = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 31
-  %addr = bitcast <vscale x 2 x i64>* %gep to i8*
-  tail call void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1> %pg, i8* %addr, i32 13)
+  %addr = bitcast <vscale x 2 x i64>* %gep to ptr
+  tail call void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1> %pg, ptr %addr, i32 13)
   ret void
 }
 
@@ -260,55 +260,52 @@ entry:
 ; scalar + scaled scalar contiguous
 ;
 
-define void @test_svprfb_ss(<vscale x 16 x i1> %pg, i8* %base, i64 %offset) {
+define void @test_svprfb_ss(<vscale x 16 x i1> %pg, ptr %base, i64 %offset) {
 ; CHECK-LABEL: test_svprfb_ss:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfb pstl3strm, p0, [x0, x1]
 ; CHECK-NEXT:    ret
 entry:
-  %addr = getelementptr i8, i8* %base, i64 %offset
-  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, i8* %addr, i32 13)
+  %addr = getelementptr i8, ptr %base, i64 %offset
+  tail call void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1> %pg, ptr %addr, i32 13)
   ret void
 }
 
-define void @test_svprfh_ss(<vscale x 8 x i1> %pg, i16* %base, i64 %offset) {
+define void @test_svprfh_ss(<vscale x 8 x i1> %pg, ptr %base, i64 %offset) {
 ; CHECK-LABEL: test_svprfh_ss:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfh pstl3strm, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr i16, i16* %base, i64 %offset
-  %addr = bitcast i16* %gep to i8*
-  tail call void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1> %pg, i8* %addr, i32 13)
+  %gep = getelementptr i16, ptr %base, i64 %offset
+  tail call void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
-define void @test_svprfw_ss(<vscale x 4 x i1> %pg, i32* %base, i64 %offset) {
+define void @test_svprfw_ss(<vscale x 4 x i1> %pg, ptr %base, i64 %offset) {
 ; CHECK-LABEL: test_svprfw_ss:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfw pstl3strm, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr i32, i32* %base, i64 %offset
-  %addr = bitcast i32* %gep to i8*
-  tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, i8* %addr, i32 13)
+  %gep = getelementptr i32, ptr %base, i64 %offset
+  tail call void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
-define void @test_svprfd_ss(<vscale x 2 x i1> %pg, i64* %base, i64 %offset) {
+define void @test_svprfd_ss(<vscale x 2 x i1> %pg, ptr %base, i64 %offset) {
 ; CHECK-LABEL: test_svprfd_ss:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    prfd pstl3strm, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
 entry:
-  %gep = getelementptr i64, i64* %base, i64 %offset
-  %addr = bitcast i64* %gep to i8*
-  tail call void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1> %pg, i8* %addr, i32 13)
+  %gep = getelementptr i64, ptr %base, i64 %offset
+  tail call void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1> %pg, ptr %gep, i32 13)
   ret void
 }
 
 
-declare void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1>, i8*, i32)
-declare void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1>,  i8*, i32)
-declare void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1>,  i8*, i32)
-declare void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1>,  i8*, i32)
+declare void @llvm.aarch64.sve.prf.nxv16i1(<vscale x 16 x i1>, ptr, i32)
+declare void @llvm.aarch64.sve.prf.nxv8i1(<vscale x 8 x i1>,  ptr, i32)
+declare void @llvm.aarch64.sve.prf.nxv4i1(<vscale x 4 x i1>,  ptr, i32)
+declare void @llvm.aarch64.sve.prf.nxv2i1(<vscale x 2 x i1>,  ptr, i32)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-scaled-offsets.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-scaled-offsets.ll
index aa49708730df8..9c9a8b10376d8 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-scaled-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-scaled-offsets.ll
@@ -8,164 +8,164 @@
 ;
 
 ; LDFF1H
-define <vscale x 4 x i32> @gldff1h_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1h_s_uxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1h_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gldff1h_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1h_s_sxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1h_s_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gldff1h_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1h_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1h_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1h_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1h_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1h_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LDFF1W
-define <vscale x 4 x i32> @gldff1w_s_uxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1w_s_uxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                                    i32* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x i32> @gldff1w_s_sxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1w_s_sxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                                    i32* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 2 x i64> @gldff1w_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1w_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1w_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                    i32* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1w_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1w_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1w_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                    i32* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 4 x float> @gldff1w_s_uxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gldff1w_s_uxtw_index_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_uxtw_index_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                                      float* %base,
+                                                                                      ptr %base,
                                                                                       <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 4 x float> @gldff1w_s_sxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gldff1w_s_sxtw_index_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_sxtw_index_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                                      float* %base,
+                                                                                      ptr %base,
                                                                                       <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
 ; LDFF1D
-define <vscale x 2 x i64> @gldff1d_s_uxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1d_s_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                                    i64* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x i64> @gldff1d_sxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                                    i64* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gldff1d_uxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gldff1d_uxtw_index_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_uxtw_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                       double* %base,
+                                                                                       ptr %base,
                                                                                        <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
 
-define <vscale x 2 x double> @gldff1d_sxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gldff1d_sxtw_index_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_sxtw_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                       double* %base,
+                                                                                       ptr %base,
                                                                                        <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
@@ -177,74 +177,74 @@ define <vscale x 2 x double> @gldff1d_sxtw_index_double(<vscale x 2 x i1> %pg, d
 ;
 
 ; LDFF1SH
-define <vscale x 4 x i32> @gldff1sh_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1sh_s_uxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gldff1sh_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1sh_s_sxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_s_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gldff1sh_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sh_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sh_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sh_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                    i16* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LDFF1SW
-define <vscale x 2 x i64> @gldff1sw_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sw_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sw_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                    i32* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sw_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sw_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sw_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                    i32* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -252,25 +252,25 @@ define <vscale x 2 x i64> @gldff1sw_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %ba
 
 
 ; LDFF1H/LDFF1SH
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; LDFF1W/LDFF1SW
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; LDFF1D
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-unscaled-offsets.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-unscaled-offsets.ll
index b076f2e007baa..ab90115ee1993 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-unscaled-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-32bit-unscaled-offsets.ll
@@ -8,213 +8,213 @@
 ;
 
 ; LDFF1B
-define <vscale x 4 x i32> @gldff1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1b_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1b_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gldff1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1b_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1b_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gldff1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1b_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1b_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1b_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1b_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LDFF1H
-define <vscale x 4 x i32> @gldff1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1h_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1h_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gldff1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1h_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1h_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gldff1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1h_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1h_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1h_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1h_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LDFF1W
-define <vscale x 4 x i32> @gldff1w_s_uxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1w_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                              i32* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x i32> @gldff1w_s_sxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1w_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                              i32* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 2 x i64> @gldff1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1w_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1w_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                              i32* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1w_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1w_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                              i32* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 4 x float> @gldff1w_s_uxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gldff1w_s_uxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_uxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                                float* %base,
+                                                                                ptr %base,
                                                                                 <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 4 x float> @gldff1w_s_sxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gldff1w_s_sxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1w_s_sxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                                float* %base,
+                                                                                ptr %base,
                                                                                 <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
 ; LDFF1D
-define <vscale x 2 x i64> @gldff1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1d_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                              i64* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x i64> @gldff1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1d_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                              i64* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gldff1d_d_uxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gldff1d_d_uxtw_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_d_uxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                 double* %base,
+                                                                                 ptr %base,
                                                                                  <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
 
-define <vscale x 2 x double> @gldff1d_d_sxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gldff1d_d_sxtw_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1d_d_sxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                 double* %base,
+                                                                                 ptr %base,
                                                                                  <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
@@ -226,152 +226,152 @@ define <vscale x 2 x double> @gldff1d_d_sxtw_double(<vscale x 2 x i1> %pg, doubl
 ;
 
 ; LDFF1SB
-define <vscale x 4 x i32> @gldff1sb_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1sb_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1sb_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gldff1sb_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1sb_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1sb_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gldff1sb_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sb_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sb_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sb_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sb_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sb_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LDFF1SH
-define <vscale x 4 x i32> @gldff1sh_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1sh_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gldff1sh_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldff1sh_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gldff1sh_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sh_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sh_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sh_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sh_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LDFF1SW
-define <vscale x 2 x i64> @gldff1sw_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sw_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sw_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                              i32* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gldff1sw_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gldff1sw_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                              i32* %base,
+                                                                              ptr %base,
                                                                               <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LDFF1B/LDFF1SB
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; LDFF1H/LDFF1SH
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; LDFF1W/LDFF1SW
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; LDFF1D
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-scaled-offset.ll
index a1f842769dae5..049a446c1d7bd 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-scaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-scaled-offset.ll
@@ -6,48 +6,48 @@
 ;   e.g. ldff1h z0.d, p0/z, [x0, z0.d, lsl #1]
 ;
 
-define <vscale x 2 x i64> @gldff1h_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1h_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1h_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                               i16* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1w_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1w_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1w_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                               i32* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1d_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1d_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1d_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                               i64* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gldff1d_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gldff1d_index_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1d_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                  double* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i64> %b)
   ret <vscale x 2 x double> %load
 }
@@ -57,31 +57,31 @@ define <vscale x 2 x double> @gldff1d_index_double(<vscale x 2 x i1> %pg, double
 ;   e.g. ldff1sh z0.d, p0/z, [x0, z0.d, lsl #1]
 ;
 
-define <vscale x 2 x i64> @gldff1sh_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1sh_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1sh_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                               i16* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1sw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1sw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                               i32* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.index.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.index.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.index.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.index.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-unscaled-offset.ll
index 140747340ab17..d89394a30c504 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ff-gather-loads-64bit-unscaled-offset.ll
@@ -6,60 +6,60 @@
 ;   e.g. ldff1h { z0.d }, p0/z, [x0, z0.d]
 ;
 
-define <vscale x 2 x i64> @gldff1b_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1b_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                       i8* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1h_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1h_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                         i16* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1w_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gldff1w_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gldff1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                         i32* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %offsets)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1d_d(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1d_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1d_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                         i64* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gldff1d_d_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gldff1d_d_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1d_d_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                            double* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i64> %b)
   ret <vscale x 2 x double> %load
 }
@@ -69,44 +69,44 @@ define <vscale x 2 x double> @gldff1d_d_double(<vscale x 2 x i1> %pg, double* %b
 ;   e.g. ldff1sh { z0.d }, p0/z, [x0, z0.d]
 ;
 
-define <vscale x 2 x i64> @gldff1sb_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1sb_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1sb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                       i8* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sh_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldff1sh_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldff1sh_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                         i16* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldff1sw_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gldff1sw_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gldff1sw_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                         i32* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %offsets)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.gather.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.gather.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.gather.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.gather.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-scaled-offsets.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-scaled-offsets.ll
index 33b94b553ff9e..c3704db6cf760 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-scaled-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-scaled-offsets.ll
@@ -8,164 +8,164 @@
 ;
 
 ; LD1H
-define <vscale x 4 x i32> @gld1h_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1h_s_uxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1h_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gld1h_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1h_s_sxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1h_s_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gld1h_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1h_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1h_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1h_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1h_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1h_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LD1W
-define <vscale x 4 x i32> @gld1w_s_uxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1w_s_uxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                                  i32* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x i32> @gld1w_s_sxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1w_s_sxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                                  i32* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 2 x i64> @gld1w_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1w_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1w_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                  i32* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1w_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1w_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                  i32* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 4 x float> @gld1w_s_uxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gld1w_s_uxtw_index_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_uxtw_index_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                                    float* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 4 x float> @gld1w_s_sxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gld1w_s_sxtw_index_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_sxtw_index_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                                    float* %base,
+                                                                                    ptr %base,
                                                                                     <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
 ; LD1D
-define <vscale x 2 x i64> @gld1d_s_uxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1d_s_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                                  i64* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x i64> @gld1d_sxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                                  i64* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_uxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gld1d_uxtw_index_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_uxtw_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                     double* %base,
+                                                                                     ptr %base,
                                                                                      <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
 
-define <vscale x 2 x double> @gld1d_sxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gld1d_sxtw_index_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_sxtw_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                     double* %base,
+                                                                                     ptr %base,
                                                                                      <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
@@ -177,74 +177,74 @@ define <vscale x 2 x double> @gld1d_sxtw_index_double(<vscale x 2 x i1> %pg, dou
 ;
 
 ; LD1SH
-define <vscale x 4 x i32> @gld1sh_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1sh_s_uxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1sh_s_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gld1sh_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1sh_s_sxtw_index(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1sh_s_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gld1sh_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sh_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sh_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sh_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sh_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sh_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                                  i16* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LD1SW
-define <vscale x 2 x i64> @gld1sw_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sw_d_uxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sw_d_uxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                  i32* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sw_d_sxtw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sw_d_sxtw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                                  i32* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -252,25 +252,25 @@ define <vscale x 2 x i64> @gld1sw_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base
 
 
 ; LD1H/LD1SH
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; LD1W/LD1SW
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; LD1D
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll
index 5ad8fa807880e..be64d5fc7d43f 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-32bit-unscaled-offsets.ll
@@ -8,213 +8,213 @@
 ;
 
 ; LD1B
-define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1b_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1b_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1b_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1b_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LD1H
-define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1h_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1h_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1h_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1h_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LD1W
-define <vscale x 4 x i32> @gld1w_s_uxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1w_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                            i32* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x i32> @gld1w_s_sxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1w_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                            i32* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1w_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                            i32* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1w_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                            i32* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 4 x float> @gld1w_s_uxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gld1w_s_uxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_uxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                              float* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 4 x float> @gld1w_s_sxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gld1w_s_sxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1w_s_sxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                              float* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
 
 ; LD1D
-define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                            i64* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                            i64* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_d_uxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gld1d_d_uxtw_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_d_uxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                               double* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
 
-define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1d_d_sxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                               double* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i32> %b)
   ret <vscale x 2 x double> %load
 }
@@ -226,152 +226,152 @@ define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, double*
 ;
 
 ; LD1SB
-define <vscale x 4 x i32> @gld1sb_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1sb_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1sb_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gld1sb_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1sb_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1sb_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sb_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sb_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                          i8* %base,
+                                                                          ptr %base,
                                                                           <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LD1SH
-define <vscale x 4 x i32> @gld1sh_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1sh_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1sh_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @gld1sh_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gld1sh_s_sxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gld1sh_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sh_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sh_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                            i16* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LD1SW
-define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sw_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                            i32* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
+define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %b) {
 ; CHECK-LABEL: gld1sw_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                            i32* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i32> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
 ; LD1B/LD1SB
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; LD1H/LD1SH
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; LD1W/LD1SW
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; LD1D
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
index 35747ed79437a..4ebe57bad6891 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-scaled-offset.ll
@@ -6,48 +6,48 @@
 ;   e.g. ld1h z0.d, p0/z, [x0, z0.d, lsl #1]
 ;
 
-define <vscale x 2 x i64> @gld1h_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1h_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1h_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                             i16* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1w_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1w_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                             i32* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1d_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1d_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                             i64* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gld1d_index_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                double* %base,
+                                                                                ptr %base,
                                                                                 <vscale x 2 x i64> %b)
   ret <vscale x 2 x double> %load
 }
@@ -57,25 +57,25 @@ define <vscale x 2 x double> @gld1d_index_double(<vscale x 2 x i1> %pg, double*
 ;   e.g. ld1sh z0.d, p0/z, [x0, z0.d, lsl #1]
 ;
 
-define <vscale x 2 x i64> @gld1sh_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sh_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sh_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                             i16* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                             i32* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -86,7 +86,7 @@ define <vscale x 2 x i64> @gld1sw_index(<vscale x 2 x i1> %pg, i32* %base, <vsca
 ;   e.g. ld1h z0.d, p0/z, [x0, z0.d, sxtw #1]
 ;
 
-define <vscale x 2 x i64> @gld1h_index_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1h_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1h_index_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
@@ -95,13 +95,13 @@ define <vscale x 2 x i64> @gld1h_index_sxtw(<vscale x 2 x i1> %pg, i16* %base, <
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                             i16* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %sxtw)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_index_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1w_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1w_index_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
@@ -110,13 +110,13 @@ define <vscale x 2 x i64> @gld1w_index_sxtw(<vscale x 2 x i1> %pg, i32* %base, <
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                             i32* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %sxtw)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1d_index_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1d_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_index_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
@@ -125,12 +125,12 @@ define <vscale x 2 x i64> @gld1d_index_sxtw(<vscale x 2 x i1> %pg, i64* %base, <
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                             i64* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %sxtw)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_index_double_sxtw(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gld1d_index_double_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_index_double_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
@@ -139,7 +139,7 @@ define <vscale x 2 x double> @gld1d_index_double_sxtw(<vscale x 2 x i1> %pg, dou
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                double* %base,
+                                                                                ptr %base,
                                                                                 <vscale x 2 x i64> %sxtw)
   ret <vscale x 2 x double> %load
 }
@@ -149,7 +149,7 @@ define <vscale x 2 x double> @gld1d_index_double_sxtw(<vscale x 2 x i1> %pg, dou
 ;   e.g. ld1sh z0.d, p0/z, [x0, z0.d, sxtw #1]
 ;
 
-define <vscale x 2 x i64> @gld1sh_index_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sh_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sh_index_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1]
@@ -158,13 +158,13 @@ define <vscale x 2 x i64> @gld1sh_index_sxtw(<vscale x 2 x i1> %pg, i16* %base,
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                             i16* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %sxtw)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_index_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sw_index_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sw_index_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2]
@@ -173,7 +173,7 @@ define <vscale x 2 x i64> @gld1sw_index_sxtw(<vscale x 2 x i1> %pg, i32* %base,
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                             i32* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %sxtw)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -184,7 +184,7 @@ define <vscale x 2 x i64> @gld1sw_index_sxtw(<vscale x 2 x i1> %pg, i32* %base,
 ;   e.g. ld1h z0.d, p0/z, [x0, z0.d, uxtw #1]
 ;
 
-define <vscale x 2 x i64> @gld1h_index_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1h_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1h_index_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
@@ -193,13 +193,13 @@ define <vscale x 2 x i64> @gld1h_index_uxtw(<vscale x 2 x i1> %pg, i16* %base, <
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                             i16* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %uxtw)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_index_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1w_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1w_index_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
@@ -208,13 +208,13 @@ define <vscale x 2 x i64> @gld1w_index_uxtw(<vscale x 2 x i1> %pg, i32* %base, <
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                             i32* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %uxtw)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1d_index_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1d_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_index_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
@@ -223,12 +223,12 @@ define <vscale x 2 x i64> @gld1d_index_uxtw(<vscale x 2 x i1> %pg, i64* %base, <
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                             i64* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %uxtw)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_index_double_uxtw(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gld1d_index_double_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_index_double_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
@@ -237,7 +237,7 @@ define <vscale x 2 x double> @gld1d_index_double_uxtw(<vscale x 2 x i1> %pg, dou
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                double* %base,
+                                                                                ptr %base,
                                                                                 <vscale x 2 x i64> %uxtw)
   ret <vscale x 2 x double> %load
 }
@@ -247,7 +247,7 @@ define <vscale x 2 x double> @gld1d_index_double_uxtw(<vscale x 2 x i1> %pg, dou
 ;   e.g. ld1sh z0.d, p0/z, [x0, z0.d, uxtw #1]
 ;
 
-define <vscale x 2 x i64> @gld1sh_index_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sh_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sh_index_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
@@ -256,13 +256,13 @@ define <vscale x 2 x i64> @gld1sh_index_uxtw(<vscale x 2 x i1> %pg, i16* %base,
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                             i16* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %uxtw)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_index_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sw_index_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sw_index_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2]
@@ -271,16 +271,16 @@ define <vscale x 2 x i64> @gld1sw_index_uxtw(<vscale x 2 x i1> %pg, i32* %base,
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                             i32* %base,
+                                                                             ptr %base,
                                                                              <vscale x 2 x i64> %uxtw)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.index.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
 
 declare <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
index 1dc15e0f1c844..e96411596613c 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-loads-64bit-unscaled-offset.ll
@@ -6,60 +6,60 @@
 ;   e.g. ld1h { z0.d }, p0/z, [x0, z0.d]
 ;
 
-define <vscale x 2 x i64> @gld1b_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1b_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                     i8* %base,
+                                                                     ptr %base,
                                                                      <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1h_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1h_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                       i16* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gld1w_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gld1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                       i32* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %offsets)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1d_d(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1d_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                       i64* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_d_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gld1d_d_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_d_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                       double* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   ret <vscale x 2 x double> %load
 }
@@ -69,37 +69,37 @@ define <vscale x 2 x double> @gld1d_d_double(<vscale x 2 x i1> %pg, double* %bas
 ;   e.g. ld1sh { z0.d }, p0/z, [x0, z0.d]
 ;
 
-define <vscale x 2 x i64> @gld1sb_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sb_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                     i8* %base,
+                                                                     ptr %base,
                                                                      <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sh_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sh_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sh_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                       i16* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gld1sw_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gld1sw_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                       i32* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %offsets)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -110,7 +110,7 @@ define <vscale x 2 x i64> @gld1sw_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x
 ;   e.g. ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
 ;
 
-define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1b_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -119,13 +119,13 @@ define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscal
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                     i8* %base,
+                                                                     ptr %base,
                                                                      <vscale x 2 x i64> %sxtw)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1h_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -134,13 +134,13 @@ define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                       i16* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %sxtw)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gld1w_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -149,13 +149,13 @@ define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %offsets)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                       i32* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %sxtw)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -164,12 +164,12 @@ define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                       i64* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %sxtw)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_d_double_sxtw(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gld1d_d_double_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_d_double_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -178,7 +178,7 @@ define <vscale x 2 x double> @gld1d_d_double_sxtw(<vscale x 2 x i1> %pg, double*
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                       double* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %sxtw)
   ret <vscale x 2 x double> %load
 }
@@ -188,7 +188,7 @@ define <vscale x 2 x double> @gld1d_d_double_sxtw(<vscale x 2 x i1> %pg, double*
 ;   e.g. ld1sh { z0.d }, p0/z, [x0, z0.d]
 ;
 
-define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sb_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -197,13 +197,13 @@ define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                     i8* %base,
+                                                                     ptr %base,
                                                                      <vscale x 2 x i64> %sxtw)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sh_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -212,13 +212,13 @@ define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vsc
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                       i16* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %sxtw)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gld1sw_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
@@ -227,7 +227,7 @@ define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vsc
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %offsets)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                       i32* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %sxtw)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
@@ -238,7 +238,7 @@ define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vsc
 ;   e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
 ;
 
-define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1b_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -247,13 +247,13 @@ define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscal
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                     i8* %base,
+                                                                     ptr %base,
                                                                      <vscale x 2 x i64> %uxtw)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1h_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -262,13 +262,13 @@ define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                       i16* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %uxtw)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gld1w_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -277,13 +277,13 @@ define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %offsets)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                       i32* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %uxtw)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -292,12 +292,12 @@ define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                       i64* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %uxtw)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gld1d_d_double_uxtw(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gld1d_d_double_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1d_d_double_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -306,7 +306,7 @@ define <vscale x 2 x double> @gld1d_d_double_uxtw(<vscale x 2 x i1> %pg, double*
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                       double* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %uxtw)
   ret <vscale x 2 x double> %load
 }
@@ -316,7 +316,7 @@ define <vscale x 2 x double> @gld1d_d_double_uxtw(<vscale x 2 x i1> %pg, double*
 ;   e.g. ld1sh { z0.d }, p0/z, [x0, z0.d]
 ;
 
-define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sb_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -325,13 +325,13 @@ define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vsca
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                     i8* %base,
+                                                                     ptr %base,
                                                                      <vscale x 2 x i64> %uxtw)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gld1sh_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -340,13 +340,13 @@ define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vsc
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %b)
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                       i16* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %uxtw)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gld1sw_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
@@ -355,17 +355,17 @@ define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vsc
                                                                  <vscale x 2 x i1> %pg,
                                                                  <vscale x 2 x i64> %offsets)
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                       i32* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %uxtw)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
 
 declare <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.uxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll
index 8d27408ba9dd5..e748a3d83f310 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-scalar-base-vector-indexes.ll
@@ -2,223 +2,223 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
 ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
-define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
 ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>]    -> 32-bit unpacked indexes
 
-define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_uxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 ; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
-define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
   ret void
  }
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
 ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
-define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
 ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked indexes
-define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_uxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 
 ; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
-define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfh pldl1strm, p0, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
   ret void
  }
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
 ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
-define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
 ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked indexes
-define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_uxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 
 ; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
-define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfw pldl1strm, p0, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
   ret void
  }
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
 ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>]    -> 32-bit indexes
-define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.s, uxtw #3]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx4vi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.s, sxtw #3]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 1)
   ret void
  }
 
 ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked indexes
-define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_uxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 
-define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_index_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 1)
   ret void
  }
 
 ; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit indexes
-define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes) nounwind {
+define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    prfd pldl1strm, p0, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 1)
+  call void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 1)
   ret void
  }
 
-declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfb.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)
 
-declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfh.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)
 
-declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfw.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)
 
-declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %indexes, i32 %prfop)
-declare void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx4vi32(<vscale x 4 x i1> %Pg, ptr %base, <vscale x 4 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.sxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.uxtw.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i32> %indexes, i32 %prfop)
+declare void @llvm.aarch64.sve.prfd.gather.index.nx2vi64(<vscale x 2 x i1> %Pg, ptr %base, <vscale x 2 x i64> %indexes, i32 %prfop)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
index 74b94ef9ad17f..af17c4eac95fd 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-imm.ll
@@ -5,91 +5,84 @@
 ; LD1B
 ;
 
-define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ld1b_inbound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1b_inbound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 4 x i32> @ld1b_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ld1b_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ld1sb_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ld1sb_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1sb_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 16 x i8> @ld1b_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1b_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #8
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #-9
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
@@ -97,143 +90,132 @@ define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %
 ; LD1H
 ;
 
-define <vscale x 8 x i16> @ld1b_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ld1b_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ld1sb_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ld1sb_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1sb_h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ld1h_inbound(<vscale x 8 x i1> %pg, i16* %a) {
+define <vscale x 8 x i16> @ld1h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 1
+  %base_scalar = bitcast <vscale x 8 x i16>* %base to ptr
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 4 x i32> @ld1h_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ld1h_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1h_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ld1sh_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ld1sh_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1sh_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ld1b_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ld1b_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1b_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sb_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ld1sb_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1sb_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1h_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ld1h_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1h_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sh_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ld1sh_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1sh_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 8 x half> @ld1h_f16_inbound(<vscale x 8 x i1> %pg, half* %a) {
+define <vscale x 8 x half> @ld1h_f16_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1h_f16_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast half* %a to <vscale x 8 x half>*
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 8 x half>* %base to half*
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, half* %base_scalar)
+  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %a, i64 1
+  %base_scalar = bitcast <vscale x 8 x half>* %base to ptr
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ld1h_bf16_inbound(<vscale x 8 x i1> %pg, bfloat* %a) #0 {
+define <vscale x 8 x bfloat> @ld1h_bf16_inbound(<vscale x 8 x i1> %pg, ptr %a) #0 {
 ; CHECK-LABEL: ld1h_bf16_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast bfloat* %a to <vscale x 8 x bfloat>*
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to bfloat*
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %base_scalar)
+  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %a, i64 1
+  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to ptr
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x bfloat> %load
 }
 
@@ -241,27 +223,25 @@ define <vscale x 8 x bfloat> @ld1h_bf16_inbound(<vscale x 8 x i1> %pg, bfloat* %
 ; LD1W
 ;
 
-define <vscale x 4 x i32> @ld1w_inbound(<vscale x 4 x i1> %pg, i32* %a) {
+define <vscale x 4 x i32> @ld1w_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1w_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base_scalar)
+  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i32>* %base to ptr
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x float> @ld1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a) {
+define <vscale x 4 x float> @ld1w_f32_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1w_f32_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast float* %a to <vscale x 4 x float>*
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x float>* %base to float*
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, float* %base_scalar)
+  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x float>* %base to ptr
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x float> %load
 }
 
@@ -269,73 +249,69 @@ define <vscale x 4 x float> @ld1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a)
 ; LD1D
 ;
 
-define <vscale x 2 x i64> @ld1d_inbound(<vscale x 2 x i1> %pg, i64* %a) {
+define <vscale x 2 x i64> @ld1d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base_scalar)
+  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 1
+  %base_scalar = bitcast <vscale x 2 x i64>* %base to ptr
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x i64> @ld1w_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ld1w_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1w_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
+  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sw_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ld1sw_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1sw_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
+  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x double> @ld1d_f64_inbound(<vscale x 2 x i1> %pg, double* %a) {
+define <vscale x 2 x double> @ld1d_f64_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1d_f64_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast double* %a to <vscale x 2 x double>*
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 2 x double>* %base to double*
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, double* %base_scalar)
+  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %a, i64 1
+  %base_scalar = bitcast <vscale x 2 x double>* %base to ptr
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x double> %load
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, i8*)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, i16*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, i8*)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, i16*)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, i32*)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-reg.ll
index 68dd37185317f..32d15af947e0d 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1-addressing-mode-reg-reg.ll
@@ -5,78 +5,78 @@
 ; LD1B
 ;
 
-define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pg, i8* %a, i64 %index) {
+define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 8 x i16> @ld1b_h(<vscale x 8 x i1> %pred, i8* %a, i64 %index) {
+define <vscale x 8 x i16> @ld1b_h(<vscale x 8 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1b_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, ptr %base)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ld1sb_h(<vscale x 8 x i1> %pred, i8* %a, i64 %index) {
+define <vscale x 8 x i16> @ld1sb_h(<vscale x 8 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1sb_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, ptr %base)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @ld1b_s(<vscale x 4 x i1> %pred, i8* %a, i64 %index) {
+define <vscale x 4 x i32> @ld1b_s(<vscale x 4 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1b_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, ptr %base)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ld1sb_s(<vscale x 4 x i1> %pred, i8* %a, i64 %index) {
+define <vscale x 4 x i32> @ld1sb_s(<vscale x 4 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1sb_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, ptr %base)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ld1b_d(<vscale x 2 x i1> %pred, i8* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1b_d(<vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, ptr %base)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, i8* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1sb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, ptr %base)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -85,76 +85,76 @@ define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, i8* %a, i64 %index)
 ; LD1H
 ;
 
-define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pg, i16* %a, i64 %index) {
+define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %index
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pg, half* %a, i64 %index) {
+define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr half, half* %a, i64 %index
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, half* %base)
+  %base = getelementptr half, ptr %a, i64 %index
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ld1h_bf16(<vscale x 8 x i1> %pg, bfloat* %a, i64 %index) #0 {
+define <vscale x 8 x bfloat> @ld1h_bf16(<vscale x 8 x i1> %pg, ptr %a, i64 %index) #0 {
 ; CHECK-LABEL: ld1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr bfloat, bfloat* %a, i64 %index
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %base)
+  %base = getelementptr bfloat, ptr %a, i64 %index
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x bfloat> %load
 }
 
-define <vscale x 4 x i32> @ld1h_s(<vscale x 4 x i1> %pred, i16* %a, i64 %index) {
+define <vscale x 4 x i32> @ld1h_s(<vscale x 4 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1h_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %index
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, ptr %base)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ld1sh_s(<vscale x 4 x i1> %pred, i16* %a, i64 %index) {
+define <vscale x 4 x i32> @ld1sh_s(<vscale x 4 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1sh_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %index
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, ptr %base)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ld1h_d(<vscale x 2 x i1> %pred, i16* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1h_d(<vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %index
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, ptr %base)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, i16* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1sh_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %index
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, ptr %base)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -163,42 +163,42 @@ define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, i16* %a, i64 %index)
 ; LD1W
 ;
 
-define<vscale x 4 x i32> @ld1w(<vscale x 4 x i1> %pg, i32* %a, i64 %index) {
+define<vscale x 4 x i32> @ld1w(<vscale x 4 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1w
 ; CHECK: ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base = getelementptr i32, i32* %a, i64 %index
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 %index
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x i32> %load
 }
 
-define<vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pg, float* %a, i64 %index) {
+define<vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1w_f32
 ; CHECK: ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base = getelementptr float, float* %a, i64 %index
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, float* %base)
+  %base = getelementptr float, ptr %a, i64 %index
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 2 x i64> @ld1w_d(<vscale x 2 x i1> %pred, i32* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1w_d(<vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %index
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 %index
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, ptr %base)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, i32* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1sw_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %index
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 %index
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, ptr %base)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -207,43 +207,43 @@ define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, i32* %a, i64 %index)
 ; LD1D
 ;
 
-define <vscale x 2 x i64> @ld1d(<vscale x 2 x i1> %pg, i64* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1d(<vscale x 2 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i64, i64* %a, i64 %index
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
+  %base = getelementptr i64, ptr %a, i64 %index
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pg, double* %a, i64 %index) {
+define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr double, double* %a, i64 %index
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, double* %base)
+  %base = getelementptr double, ptr %a, i64 %index
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x double> %load
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, i8*)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, i16*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, i8*)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, i16*)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, i32*)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll
index c2e43c830c2a1..a786754fa96b5 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1.ll
@@ -6,71 +6,71 @@
 ; LD1B
 ;
 
-define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 8 x i16> @ld1b_h(<vscale x 8 x i1> %pred, i8* %addr) {
+define <vscale x 8 x i16> @ld1b_h(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1b_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %addr)
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, ptr %addr)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ld1sb_h(<vscale x 8 x i1> %pred, i8* %addr) {
+define <vscale x 8 x i16> @ld1sb_h(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1sb_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %addr)
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, ptr %addr)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @ld1b_s(<vscale x 4 x i1> %pred, i8* %addr) {
+define <vscale x 4 x i32> @ld1b_s(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1b_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, i8* %addr)
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, ptr %addr)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ld1sb_s(<vscale x 4 x i1> %pred, i8* %addr) {
+define <vscale x 4 x i32> @ld1sb_s(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1sb_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, i8* %addr)
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, ptr %addr)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ld1b_d(<vscale x 2 x i1> %pred, i8* %addr) {
+define <vscale x 2 x i64> @ld1b_d(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %addr)
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, ptr %addr)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, i8* %addr) {
+define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1sb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %addr)
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, ptr %addr)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -79,69 +79,69 @@ define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, i8* %addr) {
 ; LD1H
 ;
 
-define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
+define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pred, i16* %addr)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pred, half* %addr) {
+define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pred, half* %addr)
+  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x half> %res
 }
 
-define <vscale x 8 x bfloat> @ld1h_bf16(<vscale x 8 x i1> %pred, bfloat* %addr) #0 {
+define <vscale x 8 x bfloat> @ld1h_bf16(<vscale x 8 x i1> %pred, ptr %addr) #0 {
 ; CHECK-LABEL: ld1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pred, bfloat* %addr)
+  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x bfloat> %res
 }
 
-define <vscale x 4 x i32> @ld1h_s(<vscale x 4 x i1> %pred, i16* %addr) {
+define <vscale x 4 x i32> @ld1h_s(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1h_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, i16* %addr)
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, ptr %addr)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ld1sh_s(<vscale x 4 x i1> %pred, i16* %addr) {
+define <vscale x 4 x i32> @ld1sh_s(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1sh_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, i16* %addr)
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, ptr %addr)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ld1h_d(<vscale x 2 x i1> %pred, i16* %addr) {
+define <vscale x 2 x i64> @ld1h_d(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, i16* %addr)
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, ptr %addr)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, i16* %addr) {
+define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1sh_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, i16* %addr)
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, ptr %addr)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -150,40 +150,40 @@ define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, i16* %addr) {
 ; LD1W
 ;
 
-define <vscale x 4 x i32> @ld1w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
+define <vscale x 4 x i32> @ld1w_i32(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pred, i32* %addr)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pred, ptr %addr)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pred, float* %addr) {
+define <vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pred, float* %addr)
+  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pred, ptr %addr)
   ret <vscale x 4 x float> %res
 }
 
-define <vscale x 2 x i64> @ld1w_d(<vscale x 2 x i1> %pred, i32* %addr) {
+define <vscale x 2 x i64> @ld1w_d(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, i32* %addr)
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, ptr %addr)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, i32* %addr) {
+define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1sw_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, i32* %addr)
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, ptr %addr)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -192,43 +192,43 @@ define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, i32* %addr) {
 ; LD1D
 ;
 
-define <vscale x 2 x i64> @ld1d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
+define <vscale x 2 x i64> @ld1d_i64(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pred,
-                                                               i64* %addr)
+                                                               ptr %addr)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pred, double* %addr) {
+define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pred,
-                                                                  double* %addr)
+                                                                  ptr %addr)
   ret <vscale x 2 x double> %res
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1.nxv8bf16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, i8*)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, i16*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, i8*)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, i16*)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, i32*)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-imm.ll
index f5c4fbed0293f..49e32ce5c5024 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-imm.ll
@@ -5,13 +5,13 @@
 ; LD1ROB
 ;
 
-define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1rob_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rob { z0.b }, p0/z, [x0, #32]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 32
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 32
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, ptr %base)
   ret <vscale x 16 x i8> %load
 }
 
@@ -19,33 +19,33 @@ define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pg, i8* %a) {
 ; LD1ROH
 ;
 
-define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pg, i16* %a) {
+define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1roh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0, #64]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 32
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 32
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 8 x half> @ld1roh_f16(<vscale x 8 x i1> %pg, half* %a) {
+define <vscale x 8 x half> @ld1roh_f16(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1roh_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0, #64]
 ; CHECK-NEXT:    ret
-  %base = getelementptr half, half* %a, i64 32
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pg, half* %base)
+  %base = getelementptr half, ptr %a, i64 32
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ld1roh_bf16(<vscale x 8 x i1> %pg, bfloat* %a) #0 {
+define <vscale x 8 x bfloat> @ld1roh_bf16(<vscale x 8 x i1> %pg, ptr %a) #0 {
 ; CHECK-LABEL: ld1roh_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0, #64]
 ; CHECK-NEXT:    ret
-  %base = getelementptr bfloat, bfloat* %a, i64 32
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %base)
+  %base = getelementptr bfloat, ptr %a, i64 32
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x bfloat> %load
 }
 
@@ -53,23 +53,23 @@ define <vscale x 8 x bfloat> @ld1roh_bf16(<vscale x 8 x i1> %pg, bfloat* %a) #0
 ; LD1ROW
 ;
 
-define <vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pg, i32* %a) {
+define <vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1row_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1row { z0.s }, p0/z, [x0, #128]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 32
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 32
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x float> @ld1row_f32(<vscale x 4 x i1> %pg, float* %a) {
+define <vscale x 4 x float> @ld1row_f32(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1row_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1row { z0.s }, p0/z, [x0, #128]
 ; CHECK-NEXT:    ret
-  %base = getelementptr float, float* %a, i64 32
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pg, float* %base)
+  %base = getelementptr float, ptr %a, i64 32
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x float> %load
 }
 
@@ -77,23 +77,23 @@ define <vscale x 4 x float> @ld1row_f32(<vscale x 4 x i1> %pg, float* %a) {
 ; LD1ROD
 ;
 
-define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pg, i64* %a) {
+define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1rod_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rod { z0.d }, p0/z, [x0, #-64]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i64, i64* %a, i64 -8
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
+  %base = getelementptr i64, ptr %a, i64 -8
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @ld1rod_f64(<vscale x 2 x i1> %pg, double* %a) {
+define <vscale x 2 x double> @ld1rod_f64(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1rod_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rod { z0.d }, p0/z, [x0, #-128]
 ; CHECK-NEXT:    ret
-  %base = getelementptr double, double* %a, i64 -16
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pg, double* %base)
+  %base = getelementptr double, ptr %a, i64 -16
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x double> %load
 }
 
@@ -102,84 +102,84 @@ define <vscale x 2 x double> @ld1rod_f64(<vscale x 2 x i1> %pg, double* %a) {
 ; range checks: immediate must be a multiple of 32 in the range -256, ..., 224
 
 ; lower bound
-define <vscale x 16 x i8> @ld1rob_i8_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1rob_i8_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1rob_i8_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rob { z0.b }, p0/z, [x0, #-256]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 -256
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 -256
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, ptr %base)
   ret <vscale x 16 x i8> %load
 }
 
 ; below lower bound
-define <vscale x 8 x i16> @ld1roh_i16_below_lower_bound(<vscale x 8 x i1> %pg, i16* %a) {
+define <vscale x 8 x i16> @ld1roh_i16_below_lower_bound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1roh_i16_below_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #-129
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0, x8, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 -129
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 -129
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 16 x i8> @ld1rob_i8_below_lower_bound_01(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1rob_i8_below_lower_bound_01(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1rob_i8_below_lower_bound_01:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #-257
 ; CHECK-NEXT:    ld1rob { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 -257
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 -257
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, ptr %base)
   ret <vscale x 16 x i8> %load
 }
 
 ; not a multiple of 32
-define <vscale x 4 x i32> @ld1row_i32_not_multiple(<vscale x 4 x i1> %pg, i32* %a) {
+define <vscale x 4 x i32> @ld1row_i32_not_multiple(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1row_i32_not_multiple:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #3
 ; CHECK-NEXT:    ld1row { z0.s }, p0/z, [x0, x8, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 3
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 3
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x i32> %load
 }
 
 ; upper bound
-define <vscale x 2 x i64> @ld1rod_i64_upper_bound(<vscale x 2 x i1> %pg, i64* %a) {
+define <vscale x 2 x i64> @ld1rod_i64_upper_bound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1rod_i64_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rod { z0.d }, p0/z, [x0, #224]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i64, i64* %a, i64 28
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
+  %base = getelementptr i64, ptr %a, i64 28
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 16 x i8> @ld1rob_i8_beyond_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ld1rob_i8_beyond_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ld1rob_i8_beyond_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #225
 ; CHECK-NEXT:    ld1rob { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 225
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 225
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, ptr %base)
   ret <vscale x 16 x i8> %load
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, ptr)
 
 
 ; +bf16 is required for the bfloat version.

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-reg.ll
index 603488cb59fa9..fc6251878348c 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro-addressing-mode-reg-reg.ll
@@ -5,13 +5,13 @@
 ; LD1ROB
 ;
 
-define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pg, i8* %a, i64 %index) {
+define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1rob_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rob { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %index
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pg, ptr %base)
   ret <vscale x 16 x i8> %load
 }
 
@@ -19,33 +19,33 @@ define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pg, i8* %a, i64 %index)
 ; LD1ROH
 ;
 
-define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pg, i16* %a, i64 %index) {
+define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1roh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %index
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 8 x half> @ld1roh_f16(<vscale x 8 x i1> %pg, half* %a, i64 %index) {
+define <vscale x 8 x half> @ld1roh_f16(<vscale x 8 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1roh_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr half, half* %a, i64 %index
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pg, half* %base)
+  %base = getelementptr half, ptr %a, i64 %index
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ld1roh_bf16(<vscale x 8 x i1> %pg, bfloat* %a, i64 %index) #0 {
+define <vscale x 8 x bfloat> @ld1roh_bf16(<vscale x 8 x i1> %pg, ptr %a, i64 %index) #0 {
 ; CHECK-LABEL: ld1roh_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr bfloat, bfloat* %a, i64 %index
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %base)
+  %base = getelementptr bfloat, ptr %a, i64 %index
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x bfloat> %load
 }
 
@@ -53,23 +53,23 @@ define <vscale x 8 x bfloat> @ld1roh_bf16(<vscale x 8 x i1> %pg, bfloat* %a, i64
 ; LD1ROW
 ;
 
-define <vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pg, i32* %a, i64 %index) {
+define <vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1row_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1row { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %index
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 %index
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x float> @ld1row_f32(<vscale x 4 x i1> %pg, float* %a, i64 %index) {
+define <vscale x 4 x float> @ld1row_f32(<vscale x 4 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1row_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1row { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr float, float* %a, i64 %index
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pg, float* %base)
+  %base = getelementptr float, ptr %a, i64 %index
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x float> %load
 }
 
@@ -77,37 +77,37 @@ define <vscale x 4 x float> @ld1row_f32(<vscale x 4 x i1> %pg, float* %a, i64 %i
 ; LD1ROD
 ;
 
-define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pg, i64* %a, i64 %index) {
+define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1rod_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rod { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i64, i64* %a, i64 %index
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
+  %base = getelementptr i64, ptr %a, i64 %index
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @ld1rod_f64(<vscale x 2 x i1> %pg, double* %a, i64 %index) {
+define <vscale x 2 x double> @ld1rod_f64(<vscale x 2 x i1> %pg, ptr %a, i64 %index) {
 ; CHECK-LABEL: ld1rod_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rod { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr double, double* %a, i64 %index
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pg, double* %base)
+  %base = getelementptr double, ptr %a, i64 %index
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x double> %load
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1ro.nxv8bf16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+f64mm,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro.ll
index 8c180d6367459..e2c8ccfcf4866 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ld1ro.ll
@@ -5,12 +5,12 @@
 ; LD1ROB
 ;
 
-define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pred, i8* %addr) nounwind {
+define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pred, ptr %addr) nounwind {
 ; CHECK-LABEL: ld1rob_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rob { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
   ret <vscale x 16 x i8> %res
 }
 
@@ -18,21 +18,21 @@ define <vscale x 16 x i8> @ld1rob_i8(<vscale x 16 x i1> %pred, i8* %addr) nounwi
 ; LD1ROH
 ;
 
-define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pred, i16* %addr) nounwind {
+define <vscale x 8 x i16> @ld1roh_i16(<vscale x 8 x i1> %pred, ptr %addr) nounwind {
 ; CHECK-LABEL: ld1roh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pred, i16* %addr)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x half> @ld1roh_half(<vscale x 8 x i1> %pred, half* %addr) nounwind {
+define <vscale x 8 x half> @ld1roh_half(<vscale x 8 x i1> %pred, ptr %addr) nounwind {
 ; CHECK-LABEL: ld1roh_half:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1roh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pred, half* %addr)
+  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x half> %res
 }
 
@@ -40,21 +40,21 @@ define <vscale x 8 x half> @ld1roh_half(<vscale x 8 x i1> %pred, half* %addr) no
 ; LD1ROW
 ;
 
-define <vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pred, i32* %addr) nounwind {
+define <vscale x 4 x i32> @ld1row_i32(<vscale x 4 x i1> %pred, ptr %addr) nounwind {
 ; CHECK-LABEL: ld1row_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1row { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pred, i32* %addr)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1> %pred, ptr %addr)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x float> @ld1row_float(<vscale x 4 x i1> %pred, float* %addr) nounwind {
+define <vscale x 4 x float> @ld1row_float(<vscale x 4 x i1> %pred, ptr %addr) nounwind {
 ; CHECK-LABEL: ld1row_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1row { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pred, float* %addr)
+  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1> %pred, ptr %addr)
   ret <vscale x 4 x float> %res
 }
 
@@ -62,31 +62,31 @@ define <vscale x 4 x float> @ld1row_float(<vscale x 4 x i1> %pred, float* %addr)
 ; LD1ROD
 ;
 
-define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pred, i64* %addr) nounwind {
+define <vscale x 2 x i64> @ld1rod_i64(<vscale x 2 x i1> %pred, ptr %addr) nounwind {
 ; CHECK-LABEL: ld1rod_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rod { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pred, i64* %addr)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1> %pred, ptr %addr)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x double> @ld1rod_double(<vscale x 2 x i1> %pred, double* %addr) nounwind {
+define <vscale x 2 x double> @ld1rod_double(<vscale x 2 x i1> %pred, ptr %addr) nounwind {
 ; CHECK-LABEL: ld1rod_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rod { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pred, double* %addr)
+  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1> %pred, ptr %addr)
   ret <vscale x 2 x double> %res
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1ro.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, half*)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1ro.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1ro.nxv8f16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1ro.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1ro.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1ro.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1ro.nxv2f64(<vscale x 2 x i1>, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll
index 1c54865bb50bb..443253c2b2909 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll
@@ -3,283 +3,283 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sme < %s | FileCheck %s
 
 ; ld2b
-define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld2.nxv32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i8, i8 *  %addr, i64 %a
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+  %addr2 = getelementptr i8, ptr  %addr, i64 %a
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %addr2)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
 ; ld2h
-define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld2.nxv16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i16, i16 *  %addr, i64 %a
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+  %addr2 = getelementptr i16, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld2.nxv16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr half, half *  %addr, i64 %a
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1> %Pg, half *%addr2)
+  %addr2 = getelementptr half, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) #0 {
 ; CHECK-LABEL: ld2.nxv16bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+  %addr2 = getelementptr bfloat, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
 ; ld2w
-define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld2.nxv8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i32, i32 *  %addr, i64 %a
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+  %addr2 = getelementptr i32, ptr  %addr, i64 %a
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1> %Pg, ptr %addr2)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld2.nxv8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr float, float *  %addr, i64 %a
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1> %Pg, float *%addr2)
+  %addr2 = getelementptr float, ptr  %addr, i64 %a
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1> %Pg, ptr %addr2)
   ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
 ; ld2d
-define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld2.nxv4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i64, i64 *  %addr, i64 %a
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+  %addr2 = getelementptr i64, ptr  %addr, i64 %a
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1> %Pg, ptr %addr2)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld2.nxv4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr double, double *  %addr, i64 %a
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1> %Pg, double *%addr2)
+  %addr2 = getelementptr double, ptr  %addr, i64 %a
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1> %Pg, ptr %addr2)
   ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
 ; ld3b
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8(<vscale x 16 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld3.nxv48i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i8, i8 *  %addr, i64 %a
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+  %addr2 = getelementptr i8, ptr  %addr, i64 %a
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %addr2)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
 ; ld3h
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24i16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld3.nxv24i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i16, i16 *  %addr, i64 %a
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+  %addr2 = getelementptr i16, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nxv24f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nxv24f16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld3.nxv24f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr half, half *  %addr, i64 %a
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1> %Pg, half *%addr2)
+  %addr2 = getelementptr half, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) #0 {
 ; CHECK-LABEL: ld3.nxv24bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+  %addr2 = getelementptr bfloat, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
 ; ld3w
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12i32(<vscale x 4 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld3.nxv12i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i32, i32 *  %addr, i64 %a
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+  %addr2 = getelementptr i32, ptr  %addr, i64 %a
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1> %Pg, ptr %addr2)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3.nxv12f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3.nxv12f32(<vscale x 4 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld3.nxv12f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr float, float *  %addr, i64 %a
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> %Pg, float *%addr2)
+  %addr2 = getelementptr float, ptr  %addr, i64 %a
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> %Pg, ptr %addr2)
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
 ; ld3d
-define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i64(<vscale x 2 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld3.nxv6i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i64, i64 *  %addr, i64 %a
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+  %addr2 = getelementptr i64, ptr  %addr, i64 %a
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1> %Pg, ptr %addr2)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3.nxv6f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3.nxv6f64(<vscale x 2 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld3.nxv6f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr double, double *  %addr, i64 %a
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1> %Pg, double *%addr2)
+  %addr2 = getelementptr double, ptr  %addr, i64 %a
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1> %Pg, ptr %addr2)
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
 ; ld4b
-define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8(<vscale x 16 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld4.nxv64i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i8, i8 *  %addr, i64 %a
-  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+  %addr2 = getelementptr i8, ptr  %addr, i64 %a
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %addr2)
   ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
 }
 
 ; ld4h
-define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4.nxv32i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4.nxv32i16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld4.nxv32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i16, i16 *  %addr, i64 %a
-  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+  %addr2 = getelementptr i16, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
 }
 
-define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4.nxv32f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4.nxv32f16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld4.nxv32f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr half, half *  %addr, i64 %a
-  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1> %Pg, half *%addr2)
+  %addr2 = getelementptr half, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
 }
 
-define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, ptr %addr, i64 %a) #0 {
 ; CHECK-LABEL: ld4.nxv32bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
-  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+  %addr2 = getelementptr bfloat, ptr  %addr, i64 %a
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1> %Pg, ptr %addr2)
   ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
 }
 
 ; ld4w
-define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4.nxv16i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4.nxv16i32(<vscale x 4 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld4.nxv16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i32, i32 *  %addr, i64 %a
-  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+  %addr2 = getelementptr i32, ptr  %addr, i64 %a
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1> %Pg, ptr %addr2)
   ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
 }
 
-define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4.nxv16f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4.nxv16f32(<vscale x 4 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld4.nxv16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr float, float *  %addr, i64 %a
-  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1> %Pg, float *%addr2)
+  %addr2 = getelementptr float, ptr  %addr, i64 %a
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1> %Pg, ptr %addr2)
   ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
 }
 
 ; ld4d
-define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4.nxv8i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4.nxv8i64(<vscale x 2 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld4.nxv8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr i64, i64 *  %addr, i64 %a
-  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+  %addr2 = getelementptr i64, ptr  %addr, i64 %a
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1> %Pg, ptr %addr2)
   ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
 }
 
-define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4.nxv8f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4.nxv8f64(<vscale x 2 x i1> %Pg, ptr %addr, i64 %a) {
 ; CHECK-LABEL: ld4.nxv8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %addr2 = getelementptr double, double *  %addr, i64 %a
-  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %Pg, double *%addr2)
+  %addr2 = getelementptr double, ptr  %addr, i64 %a
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %Pg, ptr %addr2)
   ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
 }
 
-declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1>, i8*)
-declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1>, i16*)
-declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1>, i32*)
-declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1>, i64*)
-declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1>, half*)
-declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1>, float*)
-declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1>, double*)
+declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1>, ptr)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv8i16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv4i32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv2i64(<vscale x 2 x i1>, ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv8f16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv4f32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1>, ptr)
 
-declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1>, i8*)
-declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1>, i16*)
-declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1>, i32*)
-declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1>, i64*)
-declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1>, half*)
-declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1>, float*)
-declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1>, double*)
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1>, ptr)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv8i16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv4i32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv2i64(<vscale x 2 x i1>, ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv8f16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1>, ptr)
 
-declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1>, i8*)
-declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1>, i16*)
-declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1>, i32*)
-declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1>, i64*)
-declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1>, half*)
-declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1>, float*)
-declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1>, double*)
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1>, ptr)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv8i16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv4i32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv2i64(<vscale x 2 x i1>, ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv8f16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv4f32(<vscale x 4 x i1>, ptr)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
index 1fcbc36f8bada..bfff3f76fd745 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-ff.ll
@@ -5,84 +5,84 @@
 ; LDFF1B
 ;
 
-define <vscale x 16 x i8> @ldff1b(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ldff1b(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1> %pg, i8* %a)
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1> %pg, ptr %a)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ldff1b_reg(<vscale x 16 x i1> %pg, i8* %a, i64 %offset) {
+define <vscale x 16 x i8> @ldff1b_reg(<vscale x 16 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1b_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %offset
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %offset
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 8 x i16> @ldff1b_h(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ldff1b_h(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1b_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a)
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, ptr %a)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ldff1b_h_reg(<vscale x 8 x i1> %pg, i8* %a, i64 %offset) {
+define <vscale x 8 x i16> @ldff1b_h_reg(<vscale x 8 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1b_h_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %offset
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %offset
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @ldff1b_s(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ldff1b_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1b_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, i8* %a)
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, ptr %a)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldff1b_s_reg(<vscale x 4 x i1> %pg, i8* %a, i64 %offset) {
+define <vscale x 4 x i32> @ldff1b_s_reg(<vscale x 4 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1b_s_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %offset
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %offset
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ldff1b_d(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ldff1b_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, i8* %a)
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, ptr %a)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldff1b_d_reg(<vscale x 2 x i1> %pg, i8* %a, i64 %offset) {
+define <vscale x 2 x i64> @ldff1b_d_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1b_d_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1b { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %offset
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %offset
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -91,65 +91,65 @@ define <vscale x 2 x i64> @ldff1b_d_reg(<vscale x 2 x i1> %pg, i8* %a, i64 %offs
 ; LDFF1SB
 ;
 
-define <vscale x 8 x i16> @ldff1sb_h(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ldff1sb_h(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1sb_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a)
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, ptr %a)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ldff1sb_h_reg(<vscale x 8 x i1> %pg, i8* %a, i64 %offset) {
+define <vscale x 8 x i16> @ldff1sb_h_reg(<vscale x 8 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1sb_h_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %offset
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %offset
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 4 x i32> @ldff1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ldff1sb_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1sb_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, i8* %a)
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, ptr %a)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldff1sb_s_reg(<vscale x 4 x i1> %pg, i8* %a, i64 %offset) {
+define <vscale x 4 x i32> @ldff1sb_s_reg(<vscale x 4 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1sb_s_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %offset
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %offset
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ldff1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ldff1sb_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1sb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, i8* %a)
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, ptr %a)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldff1sb_d_reg(<vscale x 2 x i1> %pg, i8* %a, i64 %offset) {
+define <vscale x 2 x i64> @ldff1sb_d_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1sb_d_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sb { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %offset
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base)
+  %base = getelementptr i8, ptr %a, i64 %offset
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -158,102 +158,102 @@ define <vscale x 2 x i64> @ldff1sb_d_reg(<vscale x 2 x i1> %pg, i8* %a, i64 %off
 ; LDFF1H
 ;
 
-define <vscale x 8 x i16> @ldff1h(<vscale x 8 x i1> %pg, i16* %a) {
+define <vscale x 8 x i16> @ldff1h(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldff1.nxv8i16(<vscale x 8 x i1> %pg, i16* %a)
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldff1.nxv8i16(<vscale x 8 x i1> %pg, ptr %a)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 8 x i16> @ldff1h_reg(<vscale x 8 x i1> %pg, i16* %a, i64 %offset) {
+define <vscale x 8 x i16> @ldff1h_reg(<vscale x 8 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1h_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %offset
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldff1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %offset
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldff1.nxv8i16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 4 x i32> @ldff1h_s(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ldff1h_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1h_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, i16* %a)
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, ptr %a)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldff1h_s_reg(<vscale x 4 x i1> %pg, i16* %a, i64 %offset) {
+define <vscale x 4 x i32> @ldff1h_s_reg(<vscale x 4 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1h_s_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %offset
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %offset
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ldff1h_d(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ldff1h_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, i16* %a)
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, ptr %a)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldff1h_d_reg(<vscale x 2 x i1> %pg, i16* %a, i64 %offset) {
+define <vscale x 2 x i64> @ldff1h_d_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1h_d_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %offset
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %offset
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 8 x half> @ldff1h_f16(<vscale x 8 x i1> %pg, half* %a) {
+define <vscale x 8 x half> @ldff1h_f16(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldff1.nxv8f16(<vscale x 8 x i1> %pg, half* %a)
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldff1.nxv8f16(<vscale x 8 x i1> %pg, ptr %a)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ldff1h_bf16(<vscale x 8 x i1> %pg, bfloat* %a) #0 {
+define <vscale x 8 x bfloat> @ldff1h_bf16(<vscale x 8 x i1> %pg, ptr %a) #0 {
 ; CHECK-LABEL: ldff1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldff1.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %a)
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldff1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %a)
   ret <vscale x 8 x bfloat> %load
 }
 
-define <vscale x 8 x half> @ldff1h_f16_reg(<vscale x 8 x i1> %pg, half* %a, i64 %offset) {
+define <vscale x 8 x half> @ldff1h_f16_reg(<vscale x 8 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1h_f16_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr half, half* %a, i64 %offset
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldff1.nxv8f16(<vscale x 8 x i1> %pg, half* %base)
+  %base = getelementptr half, ptr %a, i64 %offset
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldff1.nxv8f16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ldff1h_bf16_reg(<vscale x 8 x i1> %pg, bfloat* %a, i64 %offset) #0 {
+define <vscale x 8 x bfloat> @ldff1h_bf16_reg(<vscale x 8 x i1> %pg, ptr %a, i64 %offset) #0 {
 ; CHECK-LABEL: ldff1h_bf16_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr bfloat, bfloat* %a, i64 %offset
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldff1.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %base)
+  %base = getelementptr bfloat, ptr %a, i64 %offset
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldff1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base)
   ret <vscale x 8 x bfloat> %load
 }
 
@@ -261,44 +261,44 @@ define <vscale x 8 x bfloat> @ldff1h_bf16_reg(<vscale x 8 x i1> %pg, bfloat* %a,
 ; LDFF1SH
 ;
 
-define <vscale x 4 x i32> @ldff1sh_s(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ldff1sh_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1sh_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, i16* %a)
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, ptr %a)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldff1sh_s_reg(<vscale x 4 x i1> %pg, i16* %a, i64 %offset) {
+define <vscale x 4 x i32> @ldff1sh_s_reg(<vscale x 4 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1sh_s_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %offset
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %offset
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 2 x i64> @ldff1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ldff1sh_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1sh_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, i16* %a)
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, ptr %a)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldff1sh_d_reg(<vscale x 2 x i1> %pg, i16* %a, i64 %offset) {
+define <vscale x 2 x i64> @ldff1sh_d_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1sh_d_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sh { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %offset
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base)
+  %base = getelementptr i16, ptr %a, i64 %offset
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -307,81 +307,81 @@ define <vscale x 2 x i64> @ldff1sh_d_reg(<vscale x 2 x i1> %pg, i16* %a, i64 %of
 ; LDFF1W
 ;
 
-define <vscale x 4 x i32> @ldff1w(<vscale x 4 x i1> %pg, i32* %a) {
+define <vscale x 4 x i32> @ldff1w(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1w:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.nxv4i32(<vscale x 4 x i1> %pg, i32* %a)
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.nxv4i32(<vscale x 4 x i1> %pg, ptr %a)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x i32> @ldff1w_reg(<vscale x 4 x i1> %pg, i32* %a, i64 %offset) {
+define <vscale x 4 x i32> @ldff1w_reg(<vscale x 4 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1w_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %offset
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 %offset
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.nxv4i32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 2 x i64> @ldff1w_d(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ldff1w_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, i32* %a)
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, ptr %a)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldff1w_d_reg(<vscale x 2 x i1> %pg, i32* %a, i64 %offset) {
+define <vscale x 2 x i64> @ldff1w_d_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1w_d_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %offset
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 %offset
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 4 x float> @ldff1w_f32(<vscale x 4 x i1> %pg, float* %a) {
+define <vscale x 4 x float> @ldff1w_f32(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.nxv4f32(<vscale x 4 x i1> %pg, float* %a)
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.nxv4f32(<vscale x 4 x i1> %pg, ptr %a)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 4 x float> @ldff1w_f32_reg(<vscale x 4 x i1> %pg, float* %a, i64 %offset) {
+define <vscale x 4 x float> @ldff1w_f32_reg(<vscale x 4 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1w_f32_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr float, float* %a, i64 %offset
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.nxv4f32(<vscale x 4 x i1> %pg, float* %base)
+  %base = getelementptr float, ptr %a, i64 %offset
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldff1.nxv4f32(<vscale x 4 x i1> %pg, ptr %base)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 2 x float> @ldff1w_2f32(<vscale x 2 x i1> %pg, float* %a) {
+define <vscale x 2 x float> @ldff1w_2f32(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1w_2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x float> @llvm.aarch64.sve.ldff1.nxv2f32(<vscale x 2 x i1> %pg, float* %a)
+  %load = call <vscale x 2 x float> @llvm.aarch64.sve.ldff1.nxv2f32(<vscale x 2 x i1> %pg, ptr %a)
   ret <vscale x 2 x float> %load
 }
 
-define <vscale x 2 x float> @ldff1w_2f32_reg(<vscale x 2 x i1> %pg, float* %a, i64 %offset) {
+define <vscale x 2 x float> @ldff1w_2f32_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1w_2f32_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr float, float* %a, i64 %offset
-  %load = call <vscale x 2 x float> @llvm.aarch64.sve.ldff1.nxv2f32(<vscale x 2 x i1> %pg, float* %base)
+  %base = getelementptr float, ptr %a, i64 %offset
+  %load = call <vscale x 2 x float> @llvm.aarch64.sve.ldff1.nxv2f32(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x float> %load
 }
 
@@ -389,23 +389,23 @@ define <vscale x 2 x float> @ldff1w_2f32_reg(<vscale x 2 x i1> %pg, float* %a, i
 ; LDFF1SW
 ;
 
-define <vscale x 2 x i64> @ldff1sw_d(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ldff1sw_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1sw_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, i32* %a)
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, ptr %a)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldff1sw_d_reg(<vscale x 2 x i1> %pg, i32* %a, i64 %offset) {
+define <vscale x 2 x i64> @ldff1sw_d_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1sw_d_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1sw { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %offset
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base)
+  %base = getelementptr i32, ptr %a, i64 %offset
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
@@ -414,63 +414,63 @@ define <vscale x 2 x i64> @ldff1sw_d_reg(<vscale x 2 x i1> %pg, i32* %a, i64 %of
 ; LDFF1D
 ;
 
-define <vscale x 2 x i64> @ldff1d(<vscale x 2 x i1> %pg, i64* %a) {
+define <vscale x 2 x i64> @ldff1d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.nxv2i64(<vscale x 2 x i1> %pg, i64* %a)
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.nxv2i64(<vscale x 2 x i1> %pg, ptr %a)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x i64> @ldff1d_reg(<vscale x 2 x i1> %pg, i64* %a, i64 %offset) {
+define <vscale x 2 x i64> @ldff1d_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1d_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i64, i64* %a, i64 %offset
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
+  %base = getelementptr i64, ptr %a, i64 %offset
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.nxv2i64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x i64> %load
 }
 
 
-define <vscale x 2 x double> @ldff1d_f64(<vscale x 2 x i1> %pg, double* %a) {
+define <vscale x 2 x double> @ldff1d_f64(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldff1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.nxv2f64(<vscale x 2 x i1> %pg, double* %a)
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.nxv2f64(<vscale x 2 x i1> %pg, ptr %a)
   ret <vscale x 2 x double> %load
 }
 
-define <vscale x 2 x double> @ldff1d_f64_reg(<vscale x 2 x i1> %pg, double* %a, i64 %offset) {
+define <vscale x 2 x double> @ldff1d_f64_reg(<vscale x 2 x i1> %pg, ptr %a, i64 %offset) {
 ; CHECK-LABEL: ldff1d_f64_reg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldff1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr double, double* %a, i64 %offset
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.nxv2f64(<vscale x 2 x i1> %pg, double* %base)
+  %base = getelementptr double, ptr %a, i64 %offset
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldff1.nxv2f64(<vscale x 2 x i1> %pg, ptr %base)
   ret <vscale x 2 x double> %load
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ldff1.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1>, i8*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ldff1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ldff1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldff1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i8> @llvm.aarch64.sve.ldff1.nxv8i8(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ldff1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ldff1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldff1.nxv8bf16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1>, i8*)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1>, i16*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 2 x float> @llvm.aarch64.sve.ldff1.nxv2f32(<vscale x 2 x i1>, float*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ldff1.nxv4i8(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldff1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 2 x float> @llvm.aarch64.sve.ldff1.nxv2f32(<vscale x 2 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldff1.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1>, i8*)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1>, i16*)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1>, i32*)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ldff1.nxv2i8(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldff1.nxv2i32(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldff1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldff1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
index 183b5cec4e2a1..a774af20f2e93 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
@@ -5,517 +5,493 @@
 ; mode is done only for one instruction. The rest of the instrucions
 ; test only one immediate value in bound.
 
-define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %a)
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %a)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    addvl x8, x0, #-9
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ldnf1b_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ldnf1b_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ldnf1b_inbound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ldnf1b_inbound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
+define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  %load = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1> %pg, ptr %base_scalar)
   ret <vscale x 16 x i8> %load
 }
 
-define <vscale x 8 x i16> @ldnf1b_h(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ldnf1b_h(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a)
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, ptr %a)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ldnf1b_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ldnf1b_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ldnf1sb_h(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ldnf1sb_h(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sb_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %a)
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, ptr %a)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ldnf1sb_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
+define <vscale x 8 x i16> @ldnf1sb_h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sb_h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.h }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
-  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
+  %load = call <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x i16> @ldnf1h(<vscale x 8 x i1> %pg, i16* %a) {
+define <vscale x 8 x i16> @ldnf1h(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1> %pg, i16* %a)
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1> %pg, ptr %a)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 8 x i16> @ldnf1h_inbound(<vscale x 8 x i1> %pg, i16* %a) {
+define <vscale x 8 x i16> @ldnf1h_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
-  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 1
+  %base_scalar = bitcast <vscale x 8 x i16>* %base to ptr
+  %load = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x i16> %load
 }
 
-define <vscale x 8 x half> @ldnf1h_f16(<vscale x 8 x i1> %pg, half* %a) {
+define <vscale x 8 x half> @ldnf1h_f16(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1> %pg, half* %a)
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1> %pg, ptr %a)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ldnf1h_bf16(<vscale x 8 x i1> %pg, bfloat* %a) #0 {
+define <vscale x 8 x bfloat> @ldnf1h_bf16(<vscale x 8 x i1> %pg, ptr %a) #0 {
 ; CHECK-LABEL: ldnf1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %a)
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %a)
   ret <vscale x 8 x bfloat> %load
 }
 
-define <vscale x 8 x half> @ldnf1h_f16_inbound(<vscale x 8 x i1> %pg, half* %a) {
+define <vscale x 8 x half> @ldnf1h_f16_inbound(<vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h_f16_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast half* %a to <vscale x 8 x half>*
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 8 x half>* %base to half*
-  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1> %pg, half* %base_scalar)
+  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %a, i64 1
+  %base_scalar = bitcast <vscale x 8 x half>* %base to ptr
+  %load = call <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x half> %load
 }
 
-define <vscale x 8 x bfloat> @ldnf1h_bf16_inbound(<vscale x 8 x i1> %pg, bfloat* %a) #0 {
+define <vscale x 8 x bfloat> @ldnf1h_bf16_inbound(<vscale x 8 x i1> %pg, ptr %a) #0 {
 ; CHECK-LABEL: ldnf1h_bf16_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.h }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast bfloat* %a to <vscale x 8 x bfloat>*
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to bfloat*
-  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1> %pg, bfloat* %base_scalar)
+  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %a, i64 1
+  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to ptr
+  %load = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1> %pg, ptr %base_scalar)
   ret <vscale x 8 x bfloat> %load
 }
 
-define <vscale x 4 x i32> @ldnf1b_s(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ldnf1b_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %a)
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, ptr %a)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1b_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ldnf1b_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1sb_s(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ldnf1sb_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sb_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %a)
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, ptr %a)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1sb_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
+define <vscale x 4 x i32> @ldnf1sb_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sb_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
-  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
+  %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1h_s(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ldnf1h_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %a)
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, ptr %a)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1h_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ldnf1h_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1sh_s(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ldnf1sh_s(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sh_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %a)
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, ptr %a)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1sh_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
+define <vscale x 4 x i32> @ldnf1sh_s_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sh_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sh { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
-  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
+  %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x i32> @ldnf1w(<vscale x 4 x i1> %pg, i32* %a) {
+define <vscale x 4 x i32> @ldnf1w(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1w:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1> %pg, i32* %a)
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1> %pg, ptr %a)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x i32> @ldnf1w_inbound(<vscale x 4 x i1> %pg, i32* %a) {
+define <vscale x 4 x i32> @ldnf1w_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1w_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
-  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base_scalar)
+  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i32>* %base to ptr
+  %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x float> @ldnf1w_f32(<vscale x 4 x i1> %pg, float* %a) {
+define <vscale x 4 x float> @ldnf1w_f32(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1> %pg, float* %a)
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1> %pg, ptr %a)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 4 x float> @ldnf1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a) {
+define <vscale x 4 x float> @ldnf1w_f32_inbound(<vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1w_f32_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.s }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast float* %a to <vscale x 4 x float>*
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x float>* %base to float*
-  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1> %pg, float* %base_scalar)
+  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x float>* %base to ptr
+  %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1> %pg, ptr %base_scalar)
   ret <vscale x 4 x float> %load
 }
 
-define <vscale x 2 x i64> @ldnf1b_d(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ldnf1b_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %a)
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, ptr %a)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1b_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ldnf1b_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1b_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1b { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1sb_d(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ldnf1sb_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %a)
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, ptr %a)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1sb_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
+define <vscale x 2 x i64> @ldnf1sb_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sb_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sb { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
-  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
+  %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1h_d(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ldnf1h_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %a)
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, ptr %a)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1h_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ldnf1h_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1h_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1h { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1sh_d(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ldnf1sh_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sh_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %a)
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, ptr %a)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1sh_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
+define <vscale x 2 x i64> @ldnf1sh_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sh_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sh { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
-  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
+  %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1w_d(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ldnf1w_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %a)
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, ptr %a)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1w_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ldnf1w_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1w_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1w { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
+  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1sw_d(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ldnf1sw_d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sw_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %a)
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, ptr %a)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1sw_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
+define <vscale x 2 x i64> @ldnf1sw_d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1sw_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1sw { z0.d }, p0/z, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
-  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
+  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 7
+  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
+  %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1> %pg, ptr %base_scalar)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @ldnf1d(<vscale x 2 x i1> %pg, i64* %a) {
+define <vscale x 2 x i64> @ldnf1d(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1> %pg, i64* %a)
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1> %pg, ptr %a)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x i64> @ldnf1d_inbound(<vscale x 2 x i1> %pg, i64* %a) {
+define <vscale x 2 x i64> @ldnf1d_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
-  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base_scalar)
+  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 1
+  %base_scalar = bitcast <vscale x 2 x i64>* %base to ptr
+  %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @ldnf1d_f64(<vscale x 2 x i1> %pg, double* %a) {
+define <vscale x 2 x double> @ldnf1d_f64(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1> %pg, double* %a)
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1> %pg, ptr %a)
   ret <vscale x 2 x double> %load
 }
 
-define <vscale x 2 x double> @ldnf1d_f64_inbound(<vscale x 2 x i1> %pg, double* %a) {
+define <vscale x 2 x double> @ldnf1d_f64_inbound(<vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: ldnf1d_f64_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnf1d { z0.d }, p0/z, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast double* %a to <vscale x 2 x double>*
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 2 x double>* %base to double*
-  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1> %pg, double* %base_scalar)
+  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %a, i64 1
+  %base_scalar = bitcast <vscale x 2 x double>* %base to ptr
+  %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1> %pg, ptr %base_scalar)
   ret <vscale x 2 x double> %load
 }
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnf1.nxv16i8(<vscale x 16 x i1>, ptr)
 
-declare <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1>, i8*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i8> @llvm.aarch64.sve.ldnf1.nxv8i8(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnf1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ldnf1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnf1.nxv8bf16(<vscale x 8 x i1>, ptr)
 
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1>, i8*)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1>, i16*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnf1.nxv4i8(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnf1.nxv4i16(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnf1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldnf1.nxv4f32(<vscale x 4 x i1>, ptr)
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1>, i8*)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1>, i16*)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1>, i32*)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ldnf1.nxv2i8(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldnf1.nxv2i16(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldnf1.nxv2i32(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnf1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldnf1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
index b3db0abc19670..309742fe8282b 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
@@ -5,99 +5,98 @@
 ; LD1RQB
 ;
 
-define <vscale x 16 x i8> @ld1rqb_i8(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ld1rqb_i8(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %addr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_imm(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ld1rqb_i8_imm(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i8 16
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
+  %ptr = getelementptr inbounds i8, ptr %addr, i8 16
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_scalar(<vscale x 16 x i1> %pred, i8* %addr, i64 %idx) {
+define <vscale x 16 x i8> @ld1rqb_i8_scalar(<vscale x 16 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqb_i8_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %idx
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %idx
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_imm_lower_bound(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ld1rqb_i8_imm_lower_bound(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, #-128]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i8 -128
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
+  %ptr = getelementptr inbounds i8, ptr %addr, i8 -128
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_imm_upper_bound(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ld1rqb_i8_imm_upper_bound(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, #112]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i8 112
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
+  %ptr = getelementptr inbounds i8, ptr %addr, i8 112
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_lower_bound(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_lower_bound(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #-129
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 -129
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 -129
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_upper_bound(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_upper_bound(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #113
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, x8]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 113
-  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 113
+  %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, ptr %ptr)
   ret <vscale x 16 x i8> %res
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_imm_dupqlane(<vscale x 8 x i1> %pred, <16 x i8>* %addr) {
+define <vscale x 16 x i8> @ld1rqb_i8_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqb_i8_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <16 x i8>, <16 x i8>* %addr, i16 -1
-  %load = load <16 x i8>, <16 x i8>* %ptr
+  %ptr = getelementptr inbounds <16 x i8>, ptr %addr, i16 -1
+  %load = load <16 x i8>, ptr %ptr
   %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %load, i64 0)
   %2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
   ret <vscale x 16 x i8> %2
 }
 
-define <vscale x 16 x i8> @ld1rqb_i8_scalar_dupqlane(<vscale x 8 x i1> %pred, i8* %addr, i64 %idx) {
+define <vscale x 16 x i8> @ld1rqb_i8_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqb_i8_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %idx
-  %ptr_bitcast = bitcast i8* %ptr to <16 x i8>*
-  %load = load <16 x i8>, <16 x i8>* %ptr_bitcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %idx
+  %load = load <16 x i8>, ptr %ptr
   %1 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %load, i64 0)
   %2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %1, i64 0)
   ret <vscale x 16 x i8> %2
@@ -107,169 +106,166 @@ define <vscale x 16 x i8> @ld1rqb_i8_scalar_dupqlane(<vscale x 8 x i1> %pred, i8
 ; LD1RQH
 ;
 
-define <vscale x 8 x i16> @ld1rqh_i16(<vscale x 8 x i1> %pred, i16* %addr) {
+define <vscale x 8 x i16> @ld1rqh_i16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, i16* %addr)
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x half> @ld1rqh_f16(<vscale x 8 x i1> %pred, half* %addr) {
+define <vscale x 8 x half> @ld1rqh_f16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, half* %addr)
+  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x half> %res
 }
 
-define <vscale x 8 x i16> @ld1rqh_i16_imm(<vscale x 8 x i1> %pred, i16* %addr) {
+define <vscale x 8 x i16> @ld1rqh_i16_imm(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_i16_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-64]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i16 -32
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, i16* %ptr)
+  %ptr = getelementptr inbounds i16, ptr %addr, i16 -32
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, ptr %ptr)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x half> @ld1rqh_f16_imm(<vscale x 8 x i1> %pred, half* %addr) {
+define <vscale x 8 x half> @ld1rqh_f16_imm(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_f16_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i16 -8
-  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, half* %ptr)
+  %ptr = getelementptr inbounds half, ptr %addr, i16 -8
+  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, ptr %ptr)
   ret <vscale x 8 x half> %res
 }
 
-define <vscale x 8 x i16> @ld1rqh_i16_scalar(<vscale x 8 x i1> %pred, i16* %addr, i64 %idx) {
+define <vscale x 8 x i16> @ld1rqh_i16_scalar(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_i16_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %idx
-  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, i16* %ptr)
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %idx
+  %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, ptr %ptr)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x half> @ld1rqh_f16_scalar(<vscale x 8 x i1> %pred, half* %addr, i64 %idx) {
+define <vscale x 8 x half> @ld1rqh_f16_scalar(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_f16_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %idx
-  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, half* %ptr)
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %idx
+  %res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, ptr %ptr)
   ret <vscale x 8 x half> %res
 }
 
-define <vscale x 8 x bfloat> @ld1rqh_bf16(<vscale x 8 x i1> %pred, bfloat* %addr) {
+define <vscale x 8 x bfloat> @ld1rqh_bf16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, bfloat* %addr)
+  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, ptr %addr)
   ret <vscale x 8 x bfloat> %res
 }
 
-define <vscale x 8 x bfloat> @ld1rqh_bf16_imm(<vscale x 8 x i1> %pred, bfloat* %addr) {
+define <vscale x 8 x bfloat> @ld1rqh_bf16_imm(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_bf16_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i16 -8
-  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, bfloat* %ptr)
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i16 -8
+  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, ptr %ptr)
   ret <vscale x 8 x bfloat> %res
 }
 
-define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar(<vscale x 8 x i1> %pred, bfloat* %addr, i64 %idx) {
+define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_bf16_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %idx
-  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, bfloat* %ptr)
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %idx
+  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1> %pred, ptr %ptr)
   ret <vscale x 8 x bfloat> %res
 }
 
-define <vscale x 8 x i16> @ld1rqh_i16_imm_dupqlane(<vscale x 8 x i1> %pred, <8 x i16>* %addr) {
+define <vscale x 8 x i16> @ld1rqh_i16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_i16_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <8 x i16>, <8 x i16>* %addr, i16 -1
-  %load = load <8 x i16>, <8 x i16>* %ptr
+  %ptr = getelementptr inbounds <8 x i16>, ptr %addr, i16 -1
+  %load = load <8 x i16>, ptr %ptr
   %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %load, i64 0)
   %2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
   ret <vscale x 8 x i16> %2
 }
 
-define <vscale x 8 x i16> @ld1rqh_i16_scalar_dupqlane(<vscale x 8 x i1> %pred, i16* %addr, i64 %idx) {
+define <vscale x 8 x i16> @ld1rqh_i16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_i16_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %idx
-  %ptr_bitcast = bitcast i16* %ptr to <8 x i16>*
-  %load = load <8 x i16>, <8 x i16>* %ptr_bitcast
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %idx
+  %load = load <8 x i16>, ptr %ptr
   %1 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %load, i64 0)
   %2 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %1, i64 0)
   ret <vscale x 8 x i16> %2
 }
 
-define <vscale x 8 x half> @ld1rqh_f16_imm_dupqlane(<vscale x 8 x i1> %pred, <8 x half>* %addr) {
+define <vscale x 8 x half> @ld1rqh_f16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_f16_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <8 x half>, <8 x half>* %addr, i16 -1
-  %load = load <8 x half>, <8 x half>* %ptr
+  %ptr = getelementptr inbounds <8 x half>, ptr %addr, i16 -1
+  %load = load <8 x half>, ptr %ptr
   %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %load, i64 0)
   %2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
   ret <vscale x 8 x half> %2
 }
 
-define <vscale x 8 x half> @ld1rqh_f16_scalar_dupqlane(<vscale x 8 x i1> %pred, half* %addr, i64 %idx) {
+define <vscale x 8 x half> @ld1rqh_f16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_f16_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %idx
-  %ptr_bitcast = bitcast half* %ptr to <8 x half>*
-  %load = load <8 x half>, <8 x half>* %ptr_bitcast
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %idx
+  %load = load <8 x half>, ptr %ptr
   %1 = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %load, i64 0)
   %2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %1, i64 0)
   ret <vscale x 8 x half> %2
 }
 
-define <vscale x 8 x bfloat> @ld1rqh_bf16_imm_dupqlane(<vscale x 8 x i1> %pred, <8 x bfloat>* %addr) {
+define <vscale x 8 x bfloat> @ld1rqh_bf16_imm_dupqlane(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqh_bf16_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, #-16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <8 x bfloat>, <8 x bfloat>* %addr, i16 -1
-  %load = load <8 x bfloat>, <8 x bfloat>* %ptr
+  %ptr = getelementptr inbounds <8 x bfloat>, ptr %addr, i16 -1
+  %load = load <8 x bfloat>, ptr %ptr
   %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %load, i64 0)
   %2 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
   ret <vscale x 8 x bfloat> %2
 }
 
-define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar_dupqlane(<vscale x 8 x i1> %pred, bfloat* %addr, i64 %idx) {
+define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar_dupqlane(<vscale x 8 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqh_bf16_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %idx
-  %ptr_bitcast = bitcast bfloat* %ptr to <8 x bfloat>*
-  %load = load <8 x bfloat>, <8 x bfloat>* %ptr_bitcast
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %idx
+  %load = load <8 x bfloat>, ptr %ptr
   %1 = tail call <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %load, i64 0)
   %2 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %1, i64 0)
   ret <vscale x 8 x bfloat> %2
@@ -279,113 +275,111 @@ define <vscale x 8 x bfloat> @ld1rqh_bf16_scalar_dupqlane(<vscale x 8 x i1> %pre
 ; LD1RQW
 ;
 
-define <vscale x 4 x i32> @ld1rqw_i32(<vscale x 4 x i1> %pred, i32* %addr) {
+define <vscale x 4 x i32> @ld1rqw_i32(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, i32* %addr)
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, ptr %addr)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x float> @ld1rqw_f32(<vscale x 4 x i1> %pred, float* %addr) {
+define <vscale x 4 x float> @ld1rqw_f32(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, float* %addr)
+  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, ptr %addr)
   ret <vscale x 4 x float> %res
 }
 
-define <vscale x 4 x i32> @ld1rqw_i32_imm(<vscale x 4 x i1> %pred, i32* %addr) {
+define <vscale x 4 x i32> @ld1rqw_i32_imm(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_i32_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, #112]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %addr, i32 28
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, i32* %ptr)
+  %ptr = getelementptr inbounds i32, ptr %addr, i32 28
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, ptr %ptr)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x float> @ld1rqw_f32_imm(<vscale x 4 x i1> %pred, float* %addr) {
+define <vscale x 4 x float> @ld1rqw_f32_imm(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_f32_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, #32]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %addr, i32 8
-  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, float* %ptr)
+  %ptr = getelementptr inbounds float, ptr %addr, i32 8
+  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, ptr %ptr)
   ret <vscale x 4 x float> %res
 }
 
-define <vscale x 4 x i32> @ld1rqw_i32_scalar(<vscale x 4 x i1> %pred, i32* %base, i64 %idx) {
+define <vscale x 4 x i32> @ld1rqw_i32_scalar(<vscale x 4 x i1> %pred, ptr %base, i64 %idx) {
 ; CHECK-LABEL: ld1rqw_i32_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %base, i64 %idx
-  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, i32* %ptr)
+  %ptr = getelementptr inbounds i32, ptr %base, i64 %idx
+  %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, ptr %ptr)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x float> @ld1rqw_f32_scalar(<vscale x 4 x i1> %pred, float* %base, i64 %idx) {
+define <vscale x 4 x float> @ld1rqw_f32_scalar(<vscale x 4 x i1> %pred, ptr %base, i64 %idx) {
 ; CHECK-LABEL: ld1rqw_f32_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %base, i64 %idx
-  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, float* %ptr)
+  %ptr = getelementptr inbounds float, ptr %base, i64 %idx
+  %res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, ptr %ptr)
   ret <vscale x 4 x float> %res
 }
 
-define <vscale x 4 x i32> @ld1rqw_i32_imm_dupqlane(<vscale x 4 x i1> %pred, <4 x i32>* %addr) {
+define <vscale x 4 x i32> @ld1rqw_i32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_i32_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %addr, i32 1
-  %load = load <4 x i32>, <4 x i32>* %ptr
+  %ptr = getelementptr inbounds <4 x i32>, ptr %addr, i32 1
+  %load = load <4 x i32>, ptr %ptr
   %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %load, i64 0)
   %2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
   ret <vscale x 4 x i32> %2
 }
 
-define <vscale x 4 x i32> @ld1rqw_i32_scalar_dupqlane(<vscale x 4 x i1> %pred, i32* %addr, i64 %idx) {
+define <vscale x 4 x i32> @ld1rqw_i32_scalar_dupqlane(<vscale x 4 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqw_i32_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %addr, i64 %idx
-  %ptr_bitcast = bitcast i32* %ptr to <4 x i32>*
-  %load = load <4 x i32>, <4 x i32>* %ptr_bitcast
+  %ptr = getelementptr inbounds i32, ptr %addr, i64 %idx
+  %load = load <4 x i32>, ptr %ptr
   %1 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %load, i64 0)
   %2 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %1, i64 0)
   ret <vscale x 4 x i32> %2
 }
 
-define <vscale x 4 x float> @ld1rqw_f32_imm_dupqlane(<vscale x 4 x i1> %pred, <4 x float>* %addr) {
+define <vscale x 4 x float> @ld1rqw_f32_imm_dupqlane(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqw_f32_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <4 x float>, <4 x float>* %addr, i32 1
-  %load = load <4 x float>, <4 x float>* %ptr
+  %ptr = getelementptr inbounds <4 x float>, ptr %addr, i32 1
+  %load = load <4 x float>, ptr %ptr
   %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %load, i64 0)
   %2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
   ret <vscale x 4 x float> %2
 }
 
-define <vscale x 4 x float> @ld1rqw_f32_scalar_dupqlane(<vscale x 4 x i1> %pred, float* %addr, i64 %idx) {
+define <vscale x 4 x float> @ld1rqw_f32_scalar_dupqlane(<vscale x 4 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqw_f32_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %addr, i64 %idx
-  %ptr_bitcast = bitcast float* %ptr to <4 x float>*
-  %load = load <4 x float>, <4 x float>* %ptr_bitcast
+  %ptr = getelementptr inbounds float, ptr %addr, i64 %idx
+  %load = load <4 x float>, ptr %ptr
   %1 = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %load, i64 0)
   %2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %1, i64 0)
   ret <vscale x 4 x float> %2
@@ -395,113 +389,111 @@ define <vscale x 4 x float> @ld1rqw_f32_scalar_dupqlane(<vscale x 4 x i1> %pred,
 ; LD1RQD
 ;
 
-define <vscale x 2 x i64> @ld1rqd_i64(<vscale x 2 x i1> %pred, i64* %addr) {
+define <vscale x 2 x i64> @ld1rqd_i64(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, i64* %addr)
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, ptr %addr)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x double> @ld1rqd_f64(<vscale x 2 x i1> %pred, double* %addr) {
+define <vscale x 2 x double> @ld1rqd_f64(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, double* %addr)
+  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, ptr %addr)
   ret <vscale x 2 x double> %res
 }
 
-define <vscale x 2 x i64> @ld1rqd_i64_imm(<vscale x 2 x i1> %pred, i64* %addr) {
+define <vscale x 2 x i64> @ld1rqd_i64_imm(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_i64_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, #64]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i64, i64* %addr, i64 8
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, i64* %ptr)
+  %ptr = getelementptr inbounds i64, ptr %addr, i64 8
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, ptr %ptr)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x double> @ld1rqd_f64_imm(<vscale x 2 x i1> %pred, double* %addr) {
+define <vscale x 2 x double> @ld1rqd_f64_imm(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_f64_imm:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, #-128]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds double, double* %addr, i64 -16
-  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, double* %ptr)
+  %ptr = getelementptr inbounds double, ptr %addr, i64 -16
+  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, ptr %ptr)
   ret <vscale x 2 x double> %res
 }
 
-define <vscale x 2 x i64> @ld1rqd_i64_scalar(<vscale x 2 x i1> %pred, i64* %base, i64 %idx) {
+define <vscale x 2 x i64> @ld1rqd_i64_scalar(<vscale x 2 x i1> %pred, ptr %base, i64 %idx) {
 ; CHECK-LABEL: ld1rqd_i64_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i64, i64* %base, i64 %idx
-  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, i64* %ptr)
+  %ptr = getelementptr inbounds i64, ptr %base, i64 %idx
+  %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, ptr %ptr)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x double> @ld1rqd_f64_scalar(<vscale x 2 x i1> %pred, double* %base, i64 %idx) {
+define <vscale x 2 x double> @ld1rqd_f64_scalar(<vscale x 2 x i1> %pred, ptr %base, i64 %idx) {
 ; CHECK-LABEL: ld1rqd_f64_scalar:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds double, double* %base, i64 %idx
-  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, double* %ptr)
+  %ptr = getelementptr inbounds double, ptr %base, i64 %idx
+  %res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, ptr %ptr)
   ret <vscale x 2 x double> %res
 }
 
-define <vscale x 2 x i64> @ld1rqd_i64_imm_dupqlane(<vscale x 2 x i1> %pred, <2 x i64>* %addr) {
+define <vscale x 2 x i64> @ld1rqd_i64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_i64_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <2 x i64>, <2 x i64>* %addr, i64 1
-  %load = load <2 x i64>, <2 x i64>* %ptr
+  %ptr = getelementptr inbounds <2 x i64>, ptr %addr, i64 1
+  %load = load <2 x i64>, ptr %ptr
   %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %load, i64 0)
   %2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
   ret <vscale x 2 x i64> %2
 }
 
-define <vscale x 2 x i64> @ld1rqd_i64_scalar_dupqlane(<vscale x 2 x i1> %pred, i64* %addr, i64 %idx) {
+define <vscale x 2 x i64> @ld1rqd_i64_scalar_dupqlane(<vscale x 2 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqd_i64_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i64, i64* %addr, i64 %idx
-  %ptr_bitcast = bitcast i64* %ptr to <2 x i64>*
-  %load = load <2 x i64>, <2 x i64>* %ptr_bitcast
+  %ptr = getelementptr inbounds i64, ptr %addr, i64 %idx
+  %load = load <2 x i64>, ptr %ptr
   %1 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %load, i64 0)
   %2 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %1, i64 0)
   ret <vscale x 2 x i64> %2
 }
 
-define <vscale x 2 x double> @ld1rqd_f64_imm_dupqlane(<vscale x 2 x i1> %pred, <2 x double>* %addr) {
+define <vscale x 2 x double> @ld1rqd_f64_imm_dupqlane(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ld1rqd_f64_imm_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, #16]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds <2 x double>, <2 x double>* %addr, i64 1
-  %load = load <2 x double>, <2 x double>* %ptr
+  %ptr = getelementptr inbounds <2 x double>, ptr %addr, i64 1
+  %load = load <2 x double>, ptr %ptr
   %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %load, i64 0)
   %2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
   ret <vscale x 2 x double> %2
 }
 
-define <vscale x 2 x double> @ld1rqd_f64_scalar_dupqlane(<vscale x 2 x i1> %pred, double* %addr, i64 %idx) {
+define <vscale x 2 x double> @ld1rqd_f64_scalar_dupqlane(<vscale x 2 x i1> %pred, ptr %addr, i64 %idx) {
 ; CHECK-LABEL: ld1rqd_f64_scalar_dupqlane:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds double, double* %addr, i64 %idx
-  %ptr_bitcast = bitcast double* %ptr to <2 x double>*
-  %load = load <2 x double>, <2 x double>* %ptr_bitcast
+  %ptr = getelementptr inbounds double, ptr %addr, i64 %idx
+  %load = load <2 x double>, ptr %ptr
   %1 = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %load, i64 0)
   %2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %1, i64 0)
   ret <vscale x 2 x double> %2
@@ -511,13 +503,13 @@ define <vscale x 2 x double> @ld1rqd_f64_scalar_dupqlane(<vscale x 2 x i1> %pred
 ; LDNT1B
 ;
 
-define <vscale x 16 x i8> @ldnt1b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
+define <vscale x 16 x i8> @ldnt1b_i8(<vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %pred,
-                                                                 i8* %addr)
+                                                                 ptr %addr)
   ret <vscale x 16 x i8> %res
 }
 
@@ -525,33 +517,33 @@ define <vscale x 16 x i8> @ldnt1b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
 ; LDNT1H
 ;
 
-define <vscale x 8 x i16> @ldnt1h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
+define <vscale x 8 x i16> @ldnt1h_i16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %pred,
-                                                                 i16* %addr)
+                                                                 ptr %addr)
   ret <vscale x 8 x i16> %res
 }
 
-define <vscale x 8 x half> @ldnt1h_f16(<vscale x 8 x i1> %pred, half* %addr) {
+define <vscale x 8 x half> @ldnt1h_f16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1> %pred,
-                                                                  half* %addr)
+                                                                  ptr %addr)
   ret <vscale x 8 x half> %res
 }
 
-define <vscale x 8 x bfloat> @ldnt1h_bf16(<vscale x 8 x i1> %pred, bfloat* %addr) {
+define <vscale x 8 x bfloat> @ldnt1h_bf16(<vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1> %pred,
-                                                                     bfloat* %addr)
+                                                                     ptr %addr)
   ret <vscale x 8 x bfloat> %res
 }
 
@@ -559,23 +551,23 @@ define <vscale x 8 x bfloat> @ldnt1h_bf16(<vscale x 8 x i1> %pred, bfloat* %addr
 ; LDNT1W
 ;
 
-define <vscale x 4 x i32> @ldnt1w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
+define <vscale x 4 x i32> @ldnt1w_i32(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %pred,
-                                                                 i32* %addr)
+                                                                 ptr %addr)
   ret <vscale x 4 x i32> %res
 }
 
-define <vscale x 4 x float> @ldnt1w_f32(<vscale x 4 x i1> %pred, float* %addr) {
+define <vscale x 4 x float> @ldnt1w_f32(<vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1> %pred,
-                                                                   float* %addr)
+                                                                   ptr %addr)
   ret <vscale x 4 x float> %res
 }
 
@@ -583,44 +575,44 @@ define <vscale x 4 x float> @ldnt1w_f32(<vscale x 4 x i1> %pred, float* %addr) {
 ; LDNT1D
 ;
 
-define <vscale x 2 x i64> @ldnt1d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
+define <vscale x 2 x i64> @ldnt1d_i64(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %pred,
-                                                                 i64* %addr)
+                                                                 ptr %addr)
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x double> @ldnt1d_f64(<vscale x 2 x i1> %pred, double* %addr) {
+define <vscale x 2 x double> @ldnt1d_f64(<vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: ldnt1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1> %pred,
-                                                                    double* %addr)
+                                                                    ptr %addr)
   ret <vscale x 2 x double> %res
 }
 
 
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1>, float*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1>, double*)
-
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, float*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1rq.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1>, ptr)
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, ptr)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
 declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double>, <2 x double>, i64)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
index 127bbf5c139ec..46675f85886c8 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-mask-ldst-ext.ll
@@ -16,7 +16,7 @@ define <vscale x 16 x i32> @masked_ld1b_i8_sext_i32(<vscale x 16 x i8> *%base, <
 ; CHECK-NEXT:    sunpklo z2.s, z3.h
 ; CHECK-NEXT:    sunpkhi z3.s, z3.h
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %res
 }
@@ -32,7 +32,7 @@ define <vscale x 16 x i32> @masked_ld1b_i8_zext_i32(<vscale x 16 x i8> *%base, <
 ; CHECK-NEXT:    uunpklo z2.s, z3.h
 ; CHECK-NEXT:    uunpkhi z3.s, z3.h
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %res
 }
@@ -56,7 +56,7 @@ define <vscale x 16 x i64> @masked_ld1b_i8_sext(<vscale x 16 x i8> *%base, <vsca
 ; CHECK-NEXT:    sunpklo z6.d, z7.s
 ; CHECK-NEXT:    sunpkhi z7.d, z7.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
   ret <vscale x 16 x i64> %res
 }
@@ -80,7 +80,7 @@ define <vscale x 16 x i64> @masked_ld1b_i8_zext(<vscale x 16 x i8> *%base, <vsca
 ; CHECK-NEXT:    uunpklo z6.d, z7.s
 ; CHECK-NEXT:    uunpkhi z7.d, z7.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   %res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
   ret <vscale x 16 x i64> %res
 }
@@ -100,7 +100,7 @@ define <vscale x 8 x i64> @masked_ld1h_i16_sext(<vscale x 8 x i16> *%base, <vsca
 ; CHECK-NEXT:    sunpklo z2.d, z3.s
 ; CHECK-NEXT:    sunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   %res = sext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %res
 }
@@ -116,7 +116,7 @@ define <vscale x 8 x i64> @masked_ld1h_i16_zext(<vscale x 8 x i16> *%base, <vsca
 ; CHECK-NEXT:    uunpklo z2.d, z3.s
 ; CHECK-NEXT:    uunpkhi z3.d, z3.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
   %res = zext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
   ret <vscale x 8 x i64> %res
 }
@@ -132,7 +132,7 @@ define <vscale x 4 x i64> @masked_ld1w_i32_sext(<vscale x 4 x i32> *%base, <vsca
 ; CHECK-NEXT:    sunpklo z0.d, z1.s
 ; CHECK-NEXT:    sunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   %res = sext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %res
 }
@@ -144,12 +144,12 @@ define <vscale x 4 x i64> @masked_ld1w_i32_zext(<vscale x 4 x i32> *%base, <vsca
 ; CHECK-NEXT:    uunpklo z0.d, z1.s
 ; CHECK-NEXT:    uunpkhi z1.d, z1.s
 ; CHECK-NEXT:    ret
-  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   %res = zext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
   ret <vscale x 4 x i64> %res
 }
 
-declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>*, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
-declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>*, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i32>)
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-scaled-offsets.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-scaled-offsets.ll
index a2d87766af36f..f7a588d87f5d5 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-scaled-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-scaled-offsets.ll
@@ -8,7 +8,7 @@
 ;
 
 ; ST1H
-define void @sst1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %indices) {
+define void @sst1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) {
 ; CHECK-LABEL: sst1h_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw #1]
@@ -16,12 +16,12 @@ define void @sst1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16(<vscale x 4 x i16> %data_trunc,
                                                              <vscale x 4 x i1> %pg,
-                                                             i16* %base,
+                                                             ptr %base,
                                                              <vscale x 4 x i32> %indices)
   ret void
 }
 
-define void @sst1h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %indices) {
+define void @sst1h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) {
 ; CHECK-LABEL: sst1h_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
@@ -29,12 +29,12 @@ define void @sst1h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16(<vscale x 4 x i16> %data_trunc,
                                                              <vscale x 4 x i1> %pg,
-                                                             i16* %base,
+                                                             ptr %base,
                                                              <vscale x 4 x i32> %indices)
   ret void
 }
 
-define void @sst1h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %indices) {
+define void @sst1h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1h_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw #1]
@@ -42,12 +42,12 @@ define void @sst1h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                              <vscale x 2 x i1> %pg,
-                                                             i16* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
-define void @sst1h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %indices) {
+define void @sst1h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1h_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw #1]
@@ -55,37 +55,37 @@ define void @sst1h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                              <vscale x 2 x i1> %pg,
-                                                             i16* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
 ; ST1W
-define void @sst1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %indices) {
+define void @sst1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) {
 ; CHECK-LABEL: sst1w_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw #2]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32(<vscale x 4 x i32> %data,
                                                              <vscale x 4 x i1> %pg,
-                                                             i32* %base,
+                                                             ptr %base,
                                                              <vscale x 4 x i32> %indices)
   ret void
 }
 
-define void @sst1w_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %indices) {
+define void @sst1w_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) {
 ; CHECK-LABEL: sst1w_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw #2]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32(<vscale x 4 x i32> %data,
                                                              <vscale x 4 x i1> %pg,
-                                                             i32* %base,
+                                                             ptr %base,
                                                              <vscale x 4 x i32> %indices)
   ret void
 }
 
-define void @sst1w_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %indices) {
+define void @sst1w_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1w_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, uxtw #2]
@@ -93,12 +93,12 @@ define void @sst1w_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                              <vscale x 2 x i1> %pg,
-                                                             i32* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
-define void @sst1w_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %indices) {
+define void @sst1w_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1w_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, sxtw #2]
@@ -106,103 +106,103 @@ define void @sst1w_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                              <vscale x 2 x i1> %pg,
-                                                             i32* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
-define void @sst1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %indices) {
+define void @sst1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) {
 ; CHECK-LABEL: sst1w_s_uxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw #2]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32(<vscale x 4 x float> %data,
                                                              <vscale x 4 x i1> %pg,
-                                                             float* %base,
+                                                             ptr %base,
                                                              <vscale x 4 x i32> %indices)
   ret void
 }
 
-define void @sst1w_s_sxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %indices) {
+define void @sst1w_s_sxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) {
 ; CHECK-LABEL: sst1w_s_sxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw #2]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32(<vscale x 4 x float> %data,
                                                              <vscale x 4 x i1> %pg,
-                                                             float* %base,
+                                                             ptr %base,
                                                              <vscale x 4 x i32> %indices)
   ret void
 }
 
 ; ST1D
-define void @sst1d_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %indices) {
+define void @sst1d_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1d_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw #3]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i64(<vscale x 2 x i64> %data,
                                                              <vscale x 2 x i1> %pg,
-                                                             i64* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
-define void @sst1d_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %indices) {
+define void @sst1d_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1d_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw #3]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i64(<vscale x 2 x i64> %data,
                                                              <vscale x 2 x i1> %pg,
-                                                             i64* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
-define void @sst1d_d_uxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %indices) {
+define void @sst1d_d_uxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1d_d_uxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw #3]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2f64(<vscale x 2 x double> %data,
                                                              <vscale x 2 x i1> %pg,
-                                                             double* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
-define void @sst1d_d_sxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %indices) {
+define void @sst1d_d_sxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) {
 ; CHECK-LABEL: sst1d_d_sxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw #3]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2f64(<vscale x 2 x double> %data,
                                                              <vscale x 2 x i1> %pg,
-                                                             double* %base,
+                                                             ptr %base,
                                                              <vscale x 2 x i32> %indices)
   ret void
 }
 
 
 ; ST1H
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; ST1W
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; ST1D
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll
index c7dfc62f7cd12..0b239155e4ffc 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll
@@ -8,7 +8,7 @@
 ;
 
 ; ST1B
-define void @sst1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1b_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, z1.s, uxtw]
@@ -16,12 +16,12 @@ define void @sst1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
   call void  @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> %data_trunc,
                                                        <vscale x 4 x i1> %pg,
-                                                       i8* %base,
+                                                       ptr %base,
                                                        <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sst1b_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1b_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1b_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, z1.s, sxtw]
@@ -29,12 +29,12 @@ define void @sst1b_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8(<vscale x 4 x i8> %data_trunc,
                                                       <vscale x 4 x i1> %pg,
-                                                      i8* %base,
+                                                      ptr %base,
                                                       <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sst1b_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1b_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1b_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, z1.d, uxtw]
@@ -42,12 +42,12 @@ define void @sst1b_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8> %data_trunc,
                                                       <vscale x 2 x i1> %pg,
-                                                      i8* %base,
+                                                      ptr %base,
                                                       <vscale x 2 x i32> %offsets)
   ret void
 }
 
-define void @sst1b_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1b_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1b_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, z1.d, sxtw]
@@ -55,13 +55,13 @@ define void @sst1b_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
   call void  @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i8(<vscale x 2 x i8> %data_trunc,
                                                        <vscale x 2 x i1> %pg,
-                                                       i8* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
 ; ST1H
-define void @sst1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1h_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw]
@@ -69,12 +69,12 @@ define void @sst1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> %data_trunc,
                                                        <vscale x 4 x i1> %pg,
-                                                       i16* %base,
+                                                       ptr %base,
                                                        <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sst1h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1h_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw]
@@ -82,12 +82,12 @@ define void @sst1h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16(<vscale x 4 x i16> %data_trunc,
                                                        <vscale x 4 x i1> %pg,
-                                                       i16* %base,
+                                                       ptr %base,
                                                        <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sst1h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1h_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw]
@@ -95,12 +95,12 @@ define void @sst1h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                        <vscale x 2 x i1> %pg,
-                                                       i16* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
-define void @sst1h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1h_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw]
@@ -108,37 +108,37 @@ define void @sst1h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                        <vscale x 2 x i1> %pg,
-                                                       i16* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
 ; ST1W
-define void @sst1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1w_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32(<vscale x 4 x i32> %data,
                                                        <vscale x 4 x i1> %pg,
-                                                       i32* %base,
+                                                       ptr %base,
                                                        <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sst1w_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1w_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1w_s_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32(<vscale x 4 x i32> %data,
                                                        <vscale x 4 x i1> %pg,
-                                                       i32* %base,
+                                                       ptr %base,
                                                        <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sst1w_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1w_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1w_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, uxtw]
@@ -146,12 +146,12 @@ define void @sst1w_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                        <vscale x 2 x i1> %pg,
-                                                       i32* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
-define void @sst1w_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1w_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1w_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, sxtw]
@@ -159,109 +159,109 @@ define void @sst1w_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                        <vscale x 2 x i1> %pg,
-                                                       i32* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
-define void @sst1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1w_s_uxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32(<vscale x 4 x float> %data,
                                                        <vscale x 4 x i1> %pg,
-                                                       float* %base,
+                                                       ptr %base,
                                                        <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sst1w_s_sxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %offsets) {
+define void @sst1w_s_sxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sst1w_s_sxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32(<vscale x 4 x float> %data,
                                                        <vscale x 4 x i1> %pg,
-                                                       float* %base,
+                                                       ptr %base,
                                                        <vscale x 4 x i32> %offsets)
   ret void
 }
 
 ; ST1D
-define void @sst1d_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1d_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1d_d_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i64(<vscale x 2 x i64> %data,
                                                        <vscale x 2 x i1> %pg,
-                                                       i64* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
-define void @sst1d_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1d_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1d_d_sxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i64(<vscale x 2 x i64> %data,
                                                        <vscale x 2 x i1> %pg,
-                                                       i64* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
-define void @sst1d_d_uxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1d_d_uxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1d_d_uxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2f64(<vscale x 2 x double> %data,
                                                        <vscale x 2 x i1> %pg,
-                                                       double* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
-define void @sst1d_d_sxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %offsets) {
+define void @sst1d_d_sxtw_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
 ; CHECK-LABEL: sst1d_d_sxtw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2f64(<vscale x 2 x double> %data,
                                                        <vscale x 2 x i1> %pg,
-                                                       double* %base,
+                                                       ptr %base,
                                                        <vscale x 2 x i32> %offsets)
   ret void
 }
 
 
 ; ST1B
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; ST1H
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; ST1W
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; ST1D
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.sxtw.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.st1.scatter.uxtw.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-scaled-offset.ll
index b9096917a9f54..f635a19f0db54 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-scaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-scaled-offset.ll
@@ -6,7 +6,7 @@
 ;   e.g. st1h { z0.d }, p0, [x0, z0.d, lsl #1]
 ;
 
-define void @sst1h_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %offsets) {
+define void @sst1h_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sst1h_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, lsl #1]
@@ -14,12 +14,12 @@ define void @sst1h_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                         <vscale x 2 x i1> %pg,
-                                                        i16* %base,
+                                                        ptr %base,
                                                         <vscale x 2 x i64> %offsets)
   ret void
 }
 
-define void @sst1w_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define void @sst1w_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sst1w_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, lsl #2]
@@ -27,37 +27,37 @@ define void @sst1w_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.scatter.index.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                         <vscale x 2 x i1> %pg,
-                                                        i32* %base,
+                                                        ptr %base,
                                                         <vscale x 2 x i64> %offsets)
   ret void
 }
 
-define void  @sst1d_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %offsets) {
+define void  @sst1d_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sst1d_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, lsl #3]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.index.nxv2i64(<vscale x 2 x i64> %data,
                                                         <vscale x 2 x i1> %pg,
-                                                        i64* %base,
+                                                        ptr %base,
                                                         <vscale x 2 x i64> %offsets)
   ret void
 }
 
-define void  @sst1d_index_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %offsets) {
+define void  @sst1d_index_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sst1d_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, lsl #3]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.index.nxv2f64(<vscale x 2 x double> %data,
                                                         <vscale x 2 x i1> %pg,
-                                                        double* %base,
+                                                        ptr %base,
                                                         <vscale x 2 x i64> %offsets)
   ret void
 }
 
 
-declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.st1.scatter.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-unscaled-offset.ll
index f4c4402dbea71..71d852722b08c 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-64bit-unscaled-offset.ll
@@ -6,7 +6,7 @@
 ;   e.g. st1h { z0.d }, p0, [x0, z1.d]
 ;
 
-define void @sst1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define void @sst1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sst1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, z1.d]
@@ -14,12 +14,12 @@ define void @sst1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %base,
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
   call void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8> %data_trunc,
                                                  <vscale x 2 x i1> %pg,
-                                                 i8* %base,
+                                                 ptr %base,
                                                  <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sst1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define void @sst1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sst1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d]
@@ -27,12 +27,12 @@ define void @sst1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.scatter.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                  <vscale x 2 x i1> %pg,
-                                                 i16* %base,
+                                                 ptr %base,
                                                  <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sst1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define void @sst1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sst1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d]
@@ -40,37 +40,37 @@ define void @sst1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.scatter.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                  <vscale x 2 x i1> %pg,
-                                                 i32* %base,
+                                                 ptr %base,
                                                  <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sst1d_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define void @sst1d_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sst1d_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.nxv2i64(<vscale x 2 x i64> %data,
                                                  <vscale x 2 x i1> %pg,
-                                                 i64* %base,
+                                                 ptr %base,
                                                  <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sst1d_d_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define void @sst1d_d_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sst1d_d_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.nxv2f64(<vscale x 2 x double> %data,
                                                  <vscale x 2 x i1> %pg,
-                                                 double* %base,
+                                                 ptr %base,
                                                  <vscale x 2 x i64> %b)
   ret void
 }
 
-declare void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.st1.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.st1.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.st1.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.st1.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.st1.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
index 838b9ec21cdb3..d7319ed03ac9e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-imm.ll
@@ -6,104 +6,96 @@
 ; ST1B
 ;
 
-define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
+define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1b_inbound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
+define void @st1b_inbound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 1
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1b_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
+define void @st1b_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -8
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
+define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #8
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
+define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdvl x8, #-9
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x8]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
-  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
-  %base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
-  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
+  %base_scalar = bitcast <vscale x 16 x i8>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1b_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %a) {
+define void @st1b_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, #7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
-  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
-  %base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
+  %base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %a, i64 7
+  %base_scalar = bitcast <vscale x 4 x i8>* %base to ptr
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
-  call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i1> %pg, i8* %base_scalar)
+  call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1b_h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i8* %a) {
+define void @st1b_h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
-  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
+  %base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %a, i64 1
+  %base_scalar = bitcast <vscale x 8 x i8>* %base to ptr
   %trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8>
-  call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i1> %pg, i8* %base_scalar)
+  call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1b_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %a) {
+define void @st1b_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1b_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, #-7, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
-  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 -7
-  %base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
+  %base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %a, i64 -7
+  %base_scalar = bitcast <vscale x 2 x i8>* %base to ptr
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
-  call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i1> %pg, i8* %base_scalar)
+  call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
@@ -111,65 +103,60 @@ define void @st1b_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8*
 ; ST1H
 ;
 
-define void @st1h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i16* %a) {
+define void @st1h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1h_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #-1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
-  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 -1
-  %base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
-  call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i16* %base_scalar)
+  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %a, i64 -1
+  %base_scalar = bitcast <vscale x 8 x i16>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1h_f16_inbound(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, half* %a) {
+define void @st1h_f16_inbound(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1h_f16_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #-5, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast half* %a to <vscale x 8 x half>*
-  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 -5
-  %base_scalar = bitcast <vscale x 8 x half>* %base to half*
-  call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, half* %base_scalar)
+  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %a, i64 -5
+  %base_scalar = bitcast <vscale x 8 x half>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1h_bf16_inbound(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pg, bfloat* %a) #0 {
+define void @st1h_bf16_inbound(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pg, ptr %a) #0 {
 ; CHECK-LABEL: st1h_bf16_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, #-5, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast bfloat* %a to <vscale x 8 x bfloat>*
-  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %base_scalable, i64 -5
-  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to bfloat*
-  call void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pg, bfloat* %base_scalar)
+  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %a, i64 -5
+  %base_scalar = bitcast <vscale x 8 x bfloat>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1h_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %a) {
+define void @st1h_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1h_s_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
-  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 2
-  %base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
+  %base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %a, i64 2
+  %base_scalar = bitcast <vscale x 4 x i16>* %base to ptr
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
-  call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i1> %pg, i16* %base_scalar)
+  call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1h_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %a) {
+define void @st1h_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1h_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, #-4, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
-  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 -4
-  %base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
+  %base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %a, i64 -4
+  %base_scalar = bitcast <vscale x 2 x i16>* %base to ptr
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
-  call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i1> %pg, i16* %base_scalar)
+  call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
@@ -177,40 +164,37 @@ define void @st1h_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16
 ; ST1W
 ;
 
-define void @st1w_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %a) {
+define void @st1w_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1w_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, #6, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
-  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 6
-  %base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
-  call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base_scalar)
+  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %a, i64 6
+  %base_scalar = bitcast <vscale x 4 x i32>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1w_f32_inbound(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %a) {
+define void @st1w_f32_inbound(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1w_f32_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, #-1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast float* %a to <vscale x 4 x float>*
-  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 -1
-  %base_scalar = bitcast <vscale x 4 x float>* %base to float*
-  call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base_scalar)
+  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %a, i64 -1
+  %base_scalar = bitcast <vscale x 4 x float>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1w_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %a) {
+define void @st1w_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1w_d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, #1, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
-  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 1
-  %base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
+  %base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %a, i64 1
+  %base_scalar = bitcast <vscale x 2 x i32>* %base to ptr
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
-  call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i1> %pg, i32* %base_scalar)
+  call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
@@ -218,47 +202,45 @@ define void @st1w_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32
 ; ST1D
 ;
 
-define void @st1d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %a) {
+define void @st1d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1d_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #5, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
-  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 5
-  %base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
-  call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base_scalar)
+  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %a, i64 5
+  %base_scalar = bitcast <vscale x 2 x i64>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-define void @st1d_f64_inbound(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %a) {
+define void @st1d_f64_inbound(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %a) {
 ; CHECK-LABEL: st1d_f64_inbound:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, #-8, mul vl]
 ; CHECK-NEXT:    ret
-  %base_scalable = bitcast double* %a to <vscale x 2 x double>*
-  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 -8
-  %base_scalar = bitcast <vscale x 2 x double>* %base to double*
-  call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base_scalar)
+  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %a, i64 -8
+  %base_scalar = bitcast <vscale x 2 x double>* %base to ptr
+  call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base_scalar)
   ret void
 }
 
-declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
+declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
+declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
+declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*)
-declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
+declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll
index a29315dc2f544..e909d447f0fc9 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1-addressing-mode-reg-reg.ll
@@ -6,56 +6,56 @@
 ; ST1B
 ;
 
-define void @st1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %a, i64 %index) {
+define void @st1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
+  %base = getelementptr i8, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %base)
+                                          ptr %base)
   ret void
 }
 
 
 
-define void @st1b_h(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i8* %a, i64 %index) {
+define void @st1b_h(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1b_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
+  %base = getelementptr i8, ptr %a, i64 %index
   %trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8>
   call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc,
                                          <vscale x 8 x i1> %pred,
-                                         i8* %base)
+                                         ptr %base)
   ret void
 }
 
-define void @st1b_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i8* %a, i64 %index) {
+define void @st1b_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1b_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
+  %base = getelementptr i8, ptr %a, i64 %index
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
   call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc,
                                          <vscale x 4 x i1> %pred,
-                                         i8* %base)
+                                         ptr %base)
   ret void
 }
 
-define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i8* %a, i64 %index) {
+define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i8, i8* %a, i64 %index
+  %base = getelementptr i8, ptr %a, i64 %index
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
   call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc,
                                          <vscale x 2 x i1> %pred,
-                                         i8* %base)
+                                         ptr %base)
   ret void
 }
 
@@ -63,43 +63,43 @@ define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i8* %a, i
 ; ST1H
 ;
 
-define void @st1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i16* %a, i64 %index) {
+define void @st1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
+  %base = getelementptr i16, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, half* %a, i64 %index) {
+define void @st1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr half, half* %a, i64 %index
+  %base = getelementptr half, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data,
                                           <vscale x 8 x i1> %pred,
-                                          half* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st1h_bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pred, bfloat* %a, i64 %index) #0 {
+define void @st1h_bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pred, ptr %a, i64 %index) #0 {
 ; CHECK-LABEL: st1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr bfloat, bfloat* %a, i64 %index
+  %base = getelementptr bfloat, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat> %data,
                                            <vscale x 8 x i1> %pred,
-                                           bfloat* %base)
+                                           ptr %base)
   ret void
 }
 
-define void @st1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i16* %addr) {
+define void @st1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1h_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
@@ -107,20 +107,20 @@ define void @st1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i16* %add
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc,
                                          <vscale x 4 x i1> %pred,
-                                         i16* %addr)
+                                         ptr %addr)
   ret void
 }
 
-define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i16* %a, i64 %index) {
+define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i16, i16* %a, i64 %index
+  %base = getelementptr i16, ptr %a, i64 %index
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc,
                                          <vscale x 2 x i1> %pred,
-                                         i16* %base)
+                                         ptr %base)
   ret void
 }
 
@@ -128,40 +128,40 @@ define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i16* %a,
 ; ST1W
 ;
 
-define void @st1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i32* %a, i64 %index) {
+define void @st1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %index
+  %base = getelementptr i32, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, float* %a, i64 %index) {
+define void @st1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr float, float* %a, i64 %index
+  %base = getelementptr float, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data,
                                           <vscale x 4 x i1> %pred,
-                                          float* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i32* %a, i64 %index) {
+define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i32, i32* %a, i64 %index
+  %base = getelementptr i32, ptr %a, i64 %index
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc,
                                           <vscale x 2 x i1> %pred,
-                                          i32* %base)
+                                          ptr %base)
   ret void
 }
 
@@ -169,47 +169,47 @@ define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i32* %a,
 ; ST1D
 ;
 
-define void @st1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i64* %a, i64 %index) {
+define void @st1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr i64, i64* %a, i64 %index
+  %base = getelementptr i64, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %base)
+                                          ptr %base)
   ret void
 }
 
-define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, double* %a, i64 %index) {
+define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, ptr %a, i64 %index) {
 ; CHECK-LABEL: st1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %base = getelementptr double, double* %a, i64 %index
+  %base = getelementptr double, ptr %a, i64 %index
   call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data,
                                           <vscale x 2 x i1> %pred,
-                                          double* %base)
+                                          ptr %base)
   ret void
 }
 
-declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
+declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
+declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
+declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*)
-declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
+declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll
index 8842337149d97..5214ccff0bfea 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-st1.ll
@@ -8,18 +8,18 @@
 ; ST1B
 ;
 
-define void @st1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %addr) {
+define void @st1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st1b_h(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i8* %addr) {
+define void @st1b_h(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1b_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0]
@@ -27,11 +27,11 @@ define void @st1b_h(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i8* %addr
   %trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8>
   call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc,
                                          <vscale x 8 x i1> %pred,
-                                         i8* %addr)
+                                         ptr %addr)
   ret void
 }
 
-define void @st1b_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i8* %addr) {
+define void @st1b_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1b_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0]
@@ -39,11 +39,11 @@ define void @st1b_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i8* %addr
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
   call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc,
                                          <vscale x 4 x i1> %pred,
-                                         i8* %addr)
+                                         ptr %addr)
   ret void
 }
 
-define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i8* %addr) {
+define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0]
@@ -51,7 +51,7 @@ define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i8* %addr
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
   call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc,
                                          <vscale x 2 x i1> %pred,
-                                         i8* %addr)
+                                         ptr %addr)
   ret void
 }
 
@@ -59,40 +59,40 @@ define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i8* %addr
 ; ST1H
 ;
 
-define void @st1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i16* %addr) {
+define void @st1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, half* %addr) {
+define void @st1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data,
                                           <vscale x 8 x i1> %pred,
-                                          half* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st1h_bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pred, bfloat* %addr) #0 {
+define void @st1h_bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pred, ptr %addr) #0 {
 ; CHECK-LABEL: st1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat> %data,
                                            <vscale x 8 x i1> %pred,
-                                           bfloat* %addr)
+                                           ptr %addr)
   ret void
 }
 
-define void @st1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i16* %addr) {
+define void @st1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1h_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
@@ -100,11 +100,11 @@ define void @st1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i16* %add
   %trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc,
                                          <vscale x 4 x i1> %pred,
-                                         i16* %addr)
+                                         ptr %addr)
   ret void
 }
 
-define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i16* %addr) {
+define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0]
@@ -112,7 +112,7 @@ define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i16* %add
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc,
                                          <vscale x 2 x i1> %pred,
-                                         i16* %addr)
+                                         ptr %addr)
   ret void
 }
 
@@ -120,29 +120,29 @@ define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i16* %add
 ; ST1W
 ;
 
-define void @st1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i32* %addr) {
+define void @st1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, float* %addr) {
+define void @st1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data,
                                           <vscale x 4 x i1> %pred,
-                                          float* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i32* %addr) {
+define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
@@ -150,7 +150,7 @@ define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i32* %add
   %trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc,
                                          <vscale x 2 x i1> %pred,
-                                         i32* %addr)
+                                         ptr %addr)
   ret void
 }
 
@@ -158,45 +158,45 @@ define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i32* %add
 ; ST1D
 ;
 
-define void @st1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i64* %addr) {
+define void @st1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, double* %addr) {
+define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data,
                                           <vscale x 2 x i1> %pred,
-                                          double* %addr)
+                                          ptr %addr)
   ret void
 }
 
-declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
+declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
+declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
+declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr)
 
-declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*)
-declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*)
-declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*)
-declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
+declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll
index 1d5b0011c20e7..d6ee787a23f87 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-stN-reg-reg-addr-mode.ll
@@ -6,18 +6,18 @@
 ; ST2B
 ;
 
-define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, i8* %addr, i64 %offset) {
+define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st2b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2b { z0.b, z1.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i8, i8* %addr, i64 %offset
+  %1 = getelementptr i8, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -25,33 +25,33 @@ define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; ST2H
 ;
 
-define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, i16* %addr, i64 %offset) {
+define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st2h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i16, i16* %addr, i64 %offset
+  %1 = getelementptr i16, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, half* %addr, i64 %offset) {
+define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st2h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr half, half* %addr, i64 %offset
+  %1 = getelementptr half, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          half* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -59,33 +59,33 @@ define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; ST2W
 ;
 
-define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, i32* %addr, i64 %offset) {
+define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st2w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i32, i32* %addr, i64 %offset
+  %1 = getelementptr i32, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, float* %addr, i64 %offset) {
+define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st2w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr float, float* %addr, i64 %offset
+  %1 = getelementptr float, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          float* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -93,33 +93,33 @@ define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST2D
 ;
 
-define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, i64* %addr, i64 %offset) {
+define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st2d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i64, i64* %addr, i64 %offset
+  %1 = getelementptr i64, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, double* %addr, i64 %offset) {
+define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st2d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr double, double* %addr, i64 %offset
+  %1 = getelementptr double, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          double* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -127,7 +127,7 @@ define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; ST3B
 ;
 
-define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, i8* %addr, i64 %offset) {
+define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st3b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -135,12 +135,12 @@ define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3b { z0.b - z2.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i8, i8* %addr, i64 %offset
+  %1 = getelementptr i8, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -148,7 +148,7 @@ define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; ST3H
 ;
 
-define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, i16* %addr, i64 %offset) {
+define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st3h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -156,16 +156,16 @@ define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3h { z0.h - z2.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i16, i16* %addr, i64 %offset
+  %1 = getelementptr i16, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i16> %v2,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, half* %addr, i64 %offset) {
+define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st3h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -173,12 +173,12 @@ define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3h { z0.h - z2.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr half, half* %addr, i64 %offset
+  %1 = getelementptr half, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x half> %v2,
                                           <vscale x 8 x i1> %pred,
-                                          half* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -186,7 +186,7 @@ define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; ST3W
 ;
 
-define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, i32* %addr, i64 %offset) {
+define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st3w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -194,16 +194,16 @@ define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3w { z0.s - z2.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i32, i32* %addr, i64 %offset
+  %1 = getelementptr i32, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i32> %v2,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, float* %addr, i64 %offset) {
+define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st3w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -211,12 +211,12 @@ define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3w { z0.s - z2.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr float, float* %addr, i64 %offset
+  %1 = getelementptr float, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x float> %v2,
                                           <vscale x 4 x i1> %pred,
-                                          float* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -224,7 +224,7 @@ define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST3D
 ;
 
-define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, i64* %addr, i64 %offset) {
+define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st3d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -232,16 +232,16 @@ define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3d { z0.d - z2.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i64, i64* %addr, i64 %offset
+  %1 = getelementptr i64, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i64> %v2,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, double* %addr, i64 %offset) {
+define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st3d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -249,12 +249,12 @@ define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3d { z0.d - z2.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr double, double* %addr, i64 %offset
+  %1 = getelementptr double, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x double> %v2,
                                           <vscale x 2 x i1> %pred,
-                                          double* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -262,7 +262,7 @@ define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; ST4B
 ;
 
-define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, i8* %addr, i64 %offset) {
+define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st4b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -271,13 +271,13 @@ define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4b { z0.b - z3.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i8, i8* %addr, i64 %offset
+  %1 = getelementptr i8, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -285,7 +285,7 @@ define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; ST4H
 ;
 
-define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, i16* %addr, i64 %offset) {
+define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st4h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -294,17 +294,17 @@ define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4h { z0.h - z3.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i16, i16* %addr, i64 %offset
+  %1 = getelementptr i16, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i16> %v2,
                                           <vscale x 8 x i16> %v3,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, half* %addr, i64 %offset) {
+define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st4h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -313,13 +313,13 @@ define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4h { z0.h - z3.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr half, half* %addr, i64 %offset
+  %1 = getelementptr half, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x half> %v2,
                                           <vscale x 8 x half> %v3,
                                           <vscale x 8 x i1> %pred,
-                                          half* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -327,7 +327,7 @@ define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
 ; ST4W
 ;
 
-define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, i32* %addr, i64 %offset) {
+define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st4w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -336,17 +336,17 @@ define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4w { z0.s - z3.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i32, i32* %addr, i64 %offset
+  %1 = getelementptr i32, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i32> %v2,
                                           <vscale x 4 x i32> %v3,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, float* %addr, i64 %offset) {
+define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st4w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -355,13 +355,13 @@ define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4w { z0.s - z3.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr float, float* %addr, i64 %offset
+  %1 = getelementptr float, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x float> %v2,
                                           <vscale x 4 x float> %v3,
                                           <vscale x 4 x i1> %pred,
-                                          float* %1)
+                                          ptr %1)
   ret void
 }
 
@@ -369,7 +369,7 @@ define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST4D
 ;
 
-define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, i64* %addr, i64 %offset) {
+define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st4d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -378,17 +378,17 @@ define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4d { z0.d - z3.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr i64, i64* %addr, i64 %offset
+  %1 = getelementptr i64, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i64> %v2,
                                           <vscale x 2 x i64> %v3,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %1)
+                                          ptr %1)
   ret void
 }
 
-define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, double* %addr, i64 %offset) {
+define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, ptr %addr, i64 %offset) {
 ; CHECK-LABEL: st4d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -397,36 +397,36 @@ define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4d { z0.d - z3.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %1 = getelementptr double, double* %addr, i64 %offset
+  %1 = getelementptr double, ptr %addr, i64 %offset
   call void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x double> %v2,
                                           <vscale x 2 x double> %v3,
                                           <vscale x 2 x i1> %pred,
-                                          double* %1)
+                                          ptr %1)
   ret void
 }
 
-declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
-
-declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
-
-declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
+declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)
+
+declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)
+
+declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll
index 3992ce3ff9262..d07fd8785121b 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-stores.ll
@@ -6,7 +6,7 @@
 ; ST2B
 ;
 
-define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, i8* %addr) {
+define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -16,7 +16,7 @@ define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
   call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -24,7 +24,7 @@ define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; ST2H
 ;
 
-define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, i16* %addr) {
+define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -34,11 +34,11 @@ define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x
   call void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16> %v0,
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, half* %addr) {
+define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -48,11 +48,11 @@ define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
   call void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half> %v0,
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          half* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st2h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x i1> %pred, bfloat* %addr) #0 {
+define void @st2h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x i1> %pred, ptr %addr) #0 {
 ; CHECK-LABEL: st2h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -62,7 +62,7 @@ define void @st2h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vs
   call void @llvm.aarch64.sve.st2.nxv8bf16(<vscale x 8 x bfloat> %v0,
                                           <vscale x 8 x bfloat> %v1,
                                           <vscale x 8 x i1> %pred,
-                                          bfloat* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -70,7 +70,7 @@ define void @st2h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vs
 ; ST2W
 ;
 
-define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, i32* %addr) {
+define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -80,11 +80,11 @@ define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x
   call void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32> %v0,
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, float* %addr) {
+define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -94,7 +94,7 @@ define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
   call void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float> %v0,
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x i1> %pred,
-                                          float* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -102,7 +102,7 @@ define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST2D
 ;
 
-define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, i64* %addr) {
+define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -112,11 +112,11 @@ define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x
   call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> %v0,
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, double* %addr) {
+define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
@@ -126,21 +126,21 @@ define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
   call void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double> %v0,
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x i1> %pred,
-                                          double* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st2d_ptr(<vscale x 2 x i8*> %v0, <vscale x 2 x i8*> %v1, <vscale x 2 x i1> %pred, i8** %addr) {
+define void @st2d_ptr(<vscale x 2 x ptr> %v0, <vscale x 2 x ptr> %v1, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st2d_ptr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
 ; CHECK-NEXT:    st2d { z0.d, z1.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.st2.nxv2p0i8(<vscale x 2 x i8*> %v0,
-                                           <vscale x 2 x i8*> %v1,
+  call void @llvm.aarch64.sve.st2.nxv2p0(<vscale x 2 x ptr> %v0,
+                                           <vscale x 2 x ptr> %v1,
                                            <vscale x 2 x i1> %pred,
-                                           i8** %addr)
+                                           ptr %addr)
   ret void
 }
 
@@ -148,7 +148,7 @@ define void @st2d_ptr(<vscale x 2 x i8*> %v0, <vscale x 2 x i8*> %v1, <vscale x
 ; ST3B
 ;
 
-define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, i8* %addr) {
+define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -160,7 +160,7 @@ define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
                                           <vscale x 16 x i8> %v1,
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -168,7 +168,7 @@ define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; ST3H
 ;
 
-define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, i16* %addr) {
+define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -180,11 +180,11 @@ define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x
                                           <vscale x 8 x i16> %v1,
                                           <vscale x 8 x i16> %v2,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, half* %addr) {
+define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -196,11 +196,11 @@ define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
                                           <vscale x 8 x half> %v1,
                                           <vscale x 8 x half> %v2,
                                           <vscale x 8 x i1> %pred,
-                                          half* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st3h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x i1> %pred, bfloat* %addr) #0 {
+define void @st3h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x i1> %pred, ptr %addr) #0 {
 ; CHECK-LABEL: st3h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -212,7 +212,7 @@ define void @st3h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vs
                                           <vscale x 8 x bfloat> %v1,
                                           <vscale x 8 x bfloat> %v2,
                                           <vscale x 8 x i1> %pred,
-                                          bfloat* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -220,7 +220,7 @@ define void @st3h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vs
 ; ST3W
 ;
 
-define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, i32* %addr) {
+define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -232,11 +232,11 @@ define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x
                                           <vscale x 4 x i32> %v1,
                                           <vscale x 4 x i32> %v2,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, float* %addr) {
+define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -248,7 +248,7 @@ define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
                                           <vscale x 4 x float> %v1,
                                           <vscale x 4 x float> %v2,
                                           <vscale x 4 x i1> %pred,
-                                          float* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -256,7 +256,7 @@ define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST3D
 ;
 
-define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, i64* %addr) {
+define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -268,11 +268,11 @@ define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x
                                           <vscale x 2 x i64> %v1,
                                           <vscale x 2 x i64> %v2,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, double* %addr) {
+define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -284,11 +284,11 @@ define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
                                           <vscale x 2 x double> %v1,
                                           <vscale x 2 x double> %v2,
                                           <vscale x 2 x i1> %pred,
-                                          double* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st3d_ptr(<vscale x 2 x i8*> %v0, <vscale x 2 x i8*> %v1, <vscale x 2 x i8*> %v2, <vscale x 2 x i1> %pred, i8** %addr) {
+define void @st3d_ptr(<vscale x 2 x ptr> %v0, <vscale x 2 x ptr> %v1, <vscale x 2 x ptr> %v2, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st3d_ptr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z2 killed $z2 killed $z0_z1_z2 def $z0_z1_z2
@@ -296,11 +296,11 @@ define void @st3d_ptr(<vscale x 2 x i8*> %v0, <vscale x 2 x i8*> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2 def $z0_z1_z2
 ; CHECK-NEXT:    st3d { z0.d - z2.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.st3.nxv2p0i8(<vscale x 2 x i8*> %v0,
-                                           <vscale x 2 x i8*> %v1,
-                                           <vscale x 2 x i8*> %v2,
+  call void @llvm.aarch64.sve.st3.nxv2p0(<vscale x 2 x ptr> %v0,
+                                           <vscale x 2 x ptr> %v1,
+                                           <vscale x 2 x ptr> %v2,
                                            <vscale x 2 x i1> %pred,
-                                           i8** %addr)
+                                           ptr %addr)
   ret void
 }
 
@@ -308,7 +308,7 @@ define void @st3d_ptr(<vscale x 2 x i8*> %v0, <vscale x 2 x i8*> %v1, <vscale x
 ; ST4B
 ;
 
-define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, i8* %addr) {
+define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -322,7 +322,7 @@ define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
                                           <vscale x 16 x i8> %v2,
                                           <vscale x 16 x i8> %v3,
                                           <vscale x 16 x i1> %pred,
-                                          i8* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -330,7 +330,7 @@ define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 1
 ; ST4H
 ;
 
-define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, i16* %addr) {
+define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -344,11 +344,11 @@ define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x
                                           <vscale x 8 x i16> %v2,
                                           <vscale x 8 x i16> %v3,
                                           <vscale x 8 x i1> %pred,
-                                          i16* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, half* %addr) {
+define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -362,11 +362,11 @@ define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale
                                           <vscale x 8 x half> %v2,
                                           <vscale x 8 x half> %v3,
                                           <vscale x 8 x i1> %pred,
-                                          half* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st4h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x i1> %pred, bfloat* %addr) #0 {
+define void @st4h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vscale x 8 x bfloat> %v2, <vscale x 8 x bfloat> %v3, <vscale x 8 x i1> %pred, ptr %addr) #0 {
 ; CHECK-LABEL: st4h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -380,7 +380,7 @@ define void @st4h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vs
                                           <vscale x 8 x bfloat> %v2,
                                           <vscale x 8 x bfloat> %v3,
                                           <vscale x 8 x i1> %pred,
-                                          bfloat* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -388,7 +388,7 @@ define void @st4h_bf16(<vscale x 8 x bfloat> %v0, <vscale x 8 x bfloat> %v1, <vs
 ; ST4W
 ;
 
-define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, i32* %addr) {
+define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -402,11 +402,11 @@ define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x
                                           <vscale x 4 x i32> %v2,
                                           <vscale x 4 x i32> %v3,
                                           <vscale x 4 x i1> %pred,
-                                          i32* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, float* %addr) {
+define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -420,7 +420,7 @@ define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
                                           <vscale x 4 x float> %v2,
                                           <vscale x 4 x float> %v3,
                                           <vscale x 4 x i1> %pred,
-                                          float* %addr)
+                                          ptr %addr)
   ret void
 }
 
@@ -428,7 +428,7 @@ define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscal
 ; ST4D
 ;
 
-define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, i64* %addr) {
+define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -442,11 +442,11 @@ define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x
                                           <vscale x 2 x i64> %v2,
                                           <vscale x 2 x i64> %v3,
                                           <vscale x 2 x i1> %pred,
-                                          i64* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, double* %addr) {
+define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -460,11 +460,11 @@ define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vsc
                                           <vscale x 2 x double> %v2,
                                           <vscale x 2 x double> %v3,
                                           <vscale x 2 x i1> %pred,
-                                          double* %addr)
+                                          ptr %addr)
   ret void
 }
 
-define void @st4d_ptr(<vscale x 2 x i8*> %v0, <vscale x 2 x i8*> %v1, <vscale x 2 x i8*> %v2, <vscale x 2 x i8*> %v3, <vscale x 2 x i1> %pred, i8** %addr) {
+define void @st4d_ptr(<vscale x 2 x ptr> %v0, <vscale x 2 x ptr> %v1, <vscale x 2 x ptr> %v2, <vscale x 2 x ptr> %v3, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: st4d_ptr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
@@ -473,26 +473,26 @@ define void @st4d_ptr(<vscale x 2 x i8*> %v0, <vscale x 2 x i8*> %v1, <vscale x
 ; CHECK-NEXT:    // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
 ; CHECK-NEXT:    st4d { z0.d - z3.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.aarch64.sve.st4.nxv2p0i8(<vscale x 2 x i8*> %v0,
-                                           <vscale x 2 x i8*> %v1,
-                                           <vscale x 2 x i8*> %v2,
-                                           <vscale x 2 x i8*> %v3,
+  call void @llvm.aarch64.sve.st4.nxv2p0(<vscale x 2 x ptr> %v0,
+                                           <vscale x 2 x ptr> %v1,
+                                           <vscale x 2 x ptr> %v2,
+                                           <vscale x 2 x ptr> %v3,
                                            <vscale x 2 x i1> %pred,
-                                           i8** %addr)
+                                           ptr %addr)
   ret void
 }
 ;
 ; STNT1B
 ;
 
-define void @stnt1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %addr) {
+define void @stnt1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: stnt1b_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data,
                                             <vscale x 16 x i1> %pred,
-                                            i8* %addr)
+                                            ptr %addr)
   ret void
 }
 
@@ -500,36 +500,36 @@ define void @stnt1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %
 ; STNT1H
 ;
 
-define void @stnt1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i16* %addr) {
+define void @stnt1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: stnt1h_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data,
                                             <vscale x 8 x i1> %pred,
-                                            i16* %addr)
+                                            ptr %addr)
   ret void
 }
 
-define void @stnt1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, half* %addr) {
+define void @stnt1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: stnt1h_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half> %data,
                                             <vscale x 8 x i1> %pred,
-                                            half* %addr)
+                                            ptr %addr)
   ret void
 }
 
-define void @stnt1h_bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pred, bfloat* %addr) #0 {
+define void @stnt1h_bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pred, ptr %addr) #0 {
 ; CHECK-LABEL: stnt1h_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat> %data,
                                              <vscale x 8 x i1> %pred,
-                                             bfloat* %addr)
+                                             ptr %addr)
   ret void
 }
 
@@ -537,25 +537,25 @@ define void @stnt1h_bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x i1> %pred, b
 ; STNT1W
 ;
 
-define void @stnt1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i32* %addr) {
+define void @stnt1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: stnt1w_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data,
                                             <vscale x 4 x i1> %pred,
-                                            i32* %addr)
+                                            ptr %addr)
   ret void
 }
 
-define void @stnt1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, float* %addr) {
+define void @stnt1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: stnt1w_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float> %data,
                                             <vscale x 4 x i1> %pred,
-                                            float* %addr)
+                                            ptr %addr)
   ret void
 }
 
@@ -563,67 +563,67 @@ define void @stnt1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, flo
 ; STNT1D
 ;
 
-define void @stnt1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i64* %addr) {
+define void @stnt1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: stnt1d_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data,
                                             <vscale x 2 x i1> %pred,
-                                            i64* %addr)
+                                            ptr %addr)
   ret void
 }
 
-define void @stnt1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, double* %addr) {
+define void @stnt1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, ptr %addr) {
 ; CHECK-LABEL: stnt1d_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double> %data,
                                             <vscale x 2 x i1> %pred,
-                                            double* %addr)
-  ret void
-}
-
-
-declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st2.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
-declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
-declare void @llvm.aarch64.sve.st2.nxv2p0i8(<vscale x 2 x i8*>, <vscale x 2 x i8*>, <vscale x 2 x i1>, i8** nocapture)
-
-declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st3.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
-declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
-declare void @llvm.aarch64.sve.st3.nxv2p0i8(<vscale x 2 x i8*>, <vscale x 2 x i8*>, <vscale x 2 x i8*>, <vscale x 2 x i1>, i8** nocapture)
-
-declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.st4.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
-declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, double*)
-declare void @llvm.aarch64.sve.st4.nxv2p0i8(<vscale x 2 x i8*>, <vscale x 2 x i8*>, <vscale x 2 x i8*>, <vscale x 2 x i8*>, <vscale x 2 x i1>, i8** nocapture)
-
-declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
-declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
-declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
-declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
+                                            ptr %addr)
+  ret void
+}
+
+
+declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st2.nxv2p0(<vscale x 2 x ptr>, <vscale x 2 x ptr>, <vscale x 2 x i1>, ptr nocapture)
+
+declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st3.nxv2p0(<vscale x 2 x ptr>, <vscale x 2 x ptr>, <vscale x 2 x ptr>, <vscale x 2 x i1>, ptr nocapture)
+
+declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.st4.nxv2p0(<vscale x 2 x ptr>, <vscale x 2 x ptr>, <vscale x 2 x ptr>, <vscale x 2 x ptr>, <vscale x 2 x i1>, ptr nocapture)
+
+declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
index c93db15b0c7e1..ad1f4c9ca17c2 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll
@@ -5,7 +5,7 @@
 ; by performPostLD1Combine, which should bail out if the return
 ; type is not 128 or 64 bit vector.
 
-define <vscale x 4 x i32> @test_post_ld1_insert(i32* %a, i32** %ptr, i64 %inc) {
+define <vscale x 4 x i32> @test_post_ld1_insert(ptr %a, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_post_ld1_insert:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
@@ -13,14 +13,14 @@ define <vscale x 4 x i32> @test_post_ld1_insert(i32* %a, i32** %ptr, i64 %inc) {
 ; CHECK-NEXT:    add x8, x0, x2, lsl #2
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %load = load i32, i32* %a
+  %load = load i32, ptr %a
   %ins = insertelement <vscale x 4 x i32> undef, i32 %load, i32 0
-  %gep = getelementptr i32, i32* %a, i64 %inc
-  store i32* %gep, i32** %ptr
+  %gep = getelementptr i32, ptr %a, i64 %inc
+  store ptr %gep, ptr %ptr
   ret <vscale x 4 x i32> %ins
 }
 
-define <vscale x 2 x double> @test_post_ld1_dup(double* %a, double** %ptr, i64 %inc) {
+define <vscale x 2 x double> @test_post_ld1_dup(ptr %a, ptr %ptr, i64 %inc) {
 ; CHECK-LABEL: test_post_ld1_dup:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -28,14 +28,14 @@ define <vscale x 2 x double> @test_post_ld1_dup(double* %a, double** %ptr, i64 %
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
-  %load = load double, double* %a
+  %load = load double, ptr %a
   %dup = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %load)
-  %gep = getelementptr double, double* %a, i64 %inc
-  store double* %gep, double** %ptr
+  %gep = getelementptr double, ptr %a, i64 %inc
+  store ptr %gep, ptr %ptr
   ret <vscale x 2 x double> %dup
 }
 
-define <4 x i64> @test_post_ld1_int_fixed(i64* %data, i64 %idx, <4 x i64>* %addr)  #1 {
+define <4 x i64> @test_post_ld1_int_fixed(ptr %data, i64 %idx, ptr %addr)  #1 {
 ; CHECK-LABEL: test_post_ld1_int_fixed:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -53,17 +53,17 @@ define <4 x i64> @test_post_ld1_int_fixed(i64* %data, i64 %idx, <4 x i64>* %addr
 ; CHECK-NEXT:    add z0.d, z1.d, z0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
-  %A = load <4 x i64>, <4 x i64>* %addr
-  %ld1 = load i64, i64* %data
+  %A = load <4 x i64>, ptr %addr
+  %ld1 = load i64, ptr %data
   %vec1 = insertelement <4 x i64> %A, i64 %ld1, i32 0
-  %gep = getelementptr i64, i64* %data, i64 %idx
-  %ld2 = load i64, i64* %gep
+  %gep = getelementptr i64, ptr %data, i64 %idx
+  %ld2 = load i64, ptr %gep
   %vec2 = insertelement <4 x i64> %A, i64 %ld2, i32 2
   %res = add <4 x i64> %vec1, %vec2
   ret <4 x i64> %res
 }
 
-define <4 x double> @test_post_ld1_double_fixed(double* %data, i64 %idx, <4 x double>* %addr)  #1 {
+define <4 x double> @test_post_ld1_double_fixed(ptr %data, i64 %idx, ptr %addr)  #1 {
 ; CHECK-LABEL: test_post_ld1_double_fixed:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
@@ -80,11 +80,11 @@ define <4 x double> @test_post_ld1_double_fixed(double* %data, i64 %idx, <4 x do
 ; CHECK-NEXT:    fadd z0.d, z2.d, z0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
-  %A = load <4 x double>, <4 x double>* %addr
-  %ld1 = load double, double* %data
+  %A = load <4 x double>, ptr %addr
+  %ld1 = load double, ptr %data
   %vec1 = insertelement <4 x double> %A, double %ld1, i32 0
-  %gep = getelementptr double, double* %data, i64 %idx
-  %ld2 = load double, double* %gep
+  %gep = getelementptr double, ptr %data, i64 %idx
+  %ld2 = load double, ptr %gep
   %vec2 = insertelement <4 x double> %A, double %ld2, i32 2
   %res = fadd <4 x double> %vec1, %vec2
   ret <4 x double> %res

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
index fe1fb107ecf70..636e332df13c7 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
@@ -3,356 +3,328 @@
 
 ; LD1B
 
-define <vscale x 16 x i8> @ld1_nxv16i8(i8* %addr, i64 %off) {
+define <vscale x 16 x i8> @ld1_nxv16i8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 16 x i8>*
-  %val = load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %ptr
   ret <vscale x 16 x i8> %val
 }
 
-define <vscale x 8 x i16> @ld1_nxv16i8_bitcast_to_i16(i8* %addr, i64 %off) {
+define <vscale x 8 x i16> @ld1_nxv16i8_bitcast_to_i16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv16i8_bitcast_to_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 8 x i16>*
-  %val = load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %ptr
   ret <vscale x 8 x i16> %val
 }
 
-define <vscale x 4 x i32> @ld1_nxv16i8_bitcast_to_i32(i8* %addr, i64 %off) {
+define <vscale x 4 x i32> @ld1_nxv16i8_bitcast_to_i32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv16i8_bitcast_to_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 4 x i32>*
-  %val = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %ptr
   ret <vscale x 4 x i32> %val
 }
 
-define <vscale x 2 x i64> @ld1_nxv16i8_bitcast_to_i64(i8* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv16i8_bitcast_to_i64(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv16i8_bitcast_to_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 2 x i64>*
-  %val = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %ptr
   ret <vscale x 2 x i64> %val
 }
 
-define <vscale x 8 x i16> @ld1_nxv8i16_zext8(i8* %addr, i64 %off) {
+define <vscale x 8 x i16> @ld1_nxv8i16_zext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8i16_zext8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 8 x i8>*
-  %val = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %ptr
   %zext = zext <vscale x 8 x i8> %val to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %zext
 }
 
-define <vscale x 4 x i32> @ld1_nxv4i32_zext8(i8* %addr, i64 %off) {
+define <vscale x 4 x i32> @ld1_nxv4i32_zext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_zext8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 4 x i8>*
-  %val = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %ptr
   %zext = zext <vscale x 4 x i8> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %zext
 }
 
-define <vscale x 2 x i64> @ld1_nxv2i64_zext8(i8* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv2i64_zext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_zext8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 2 x i8>*
-  %val = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %ptr
   %zext = zext <vscale x 2 x i8> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %zext
 }
 
-define <vscale x 8 x i16> @ld1_nxv8i16_sext8(i8* %addr, i64 %off) {
+define <vscale x 8 x i16> @ld1_nxv8i16_sext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8i16_sext8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1sb { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 8 x i8>*
-  %val = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 8 x i8>, <vscale x 8 x i8>* %ptr
   %sext = sext <vscale x 8 x i8> %val to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %sext
 }
 
-define <vscale x 4 x i32> @ld1_nxv4i32_sext8(i8* %addr, i64 %off) {
+define <vscale x 4 x i32> @ld1_nxv4i32_sext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_sext8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 4 x i8>*
-  %val = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x i8>, <vscale x 4 x i8>* %ptr
   %sext = sext <vscale x 4 x i8> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %sext
 }
 
-define <vscale x 2 x i64> @ld1_nxv2i64_sext8(i8* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv2i64_sext8(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_sext8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 2 x i8>*
-  %val = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i8>, <vscale x 2 x i8>* %ptr
   %sext = sext <vscale x 2 x i8> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %sext
 }
 
 ; LD1H
 
-define <vscale x 8 x i16> @ld1_nxv8i16(i16* %addr, i64 %off) {
+define <vscale x 8 x i16> @ld1_nxv8i16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 8 x i16>*
-  %val = load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
+  %val = load volatile <vscale x 8 x i16>, <vscale x 8 x i16>* %ptr
   ret <vscale x 8 x i16> %val
 }
 
-define <vscale x 4 x i32> @ld1_nxv4i32_zext16(i16* %addr, i64 %off) {
+define <vscale x 4 x i32> @ld1_nxv4i32_zext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_zext16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 4 x i16>*
-  %val = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %ptr
   %zext = zext <vscale x 4 x i16> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %zext
 }
 
-define <vscale x 2 x i64> @ld1_nxv2i64_zext16(i16* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv2i64_zext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_zext16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 2 x i16>*
-  %val = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %ptr
   %zext = zext <vscale x 2 x i16> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %zext
 }
 
-define <vscale x 4 x i32> @ld1_nxv4i32_sext16(i16* %addr, i64 %off) {
+define <vscale x 4 x i32> @ld1_nxv4i32_sext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32_sext16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 4 x i16>*
-  %val = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x i16>, <vscale x 4 x i16>* %ptr
   %sext = sext <vscale x 4 x i16> %val to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %sext
 }
 
-define <vscale x 2 x i64> @ld1_nxv2i64_sext16(i16* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv2i64_sext16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_sext16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 2 x i16>*
-  %val = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i16>, <vscale x 2 x i16>* %ptr
   %sext = sext <vscale x 2 x i16> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %sext
 }
 
-define <vscale x 8 x half> @ld1_nxv8f16(half* %addr, i64 %off) {
+define <vscale x 8 x half> @ld1_nxv8f16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %off
-  %ptrcast = bitcast half* %ptr to <vscale x 8 x half>*
-  %val = load volatile <vscale x 8 x half>, <vscale x 8 x half>* %ptrcast
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %off
+  %val = load volatile <vscale x 8 x half>, <vscale x 8 x half>* %ptr
   ret <vscale x 8 x half> %val
 }
 
-define <vscale x 8 x bfloat> @ld1_nxv8bf16(bfloat* %addr, i64 %off) {
+define <vscale x 8 x bfloat> @ld1_nxv8bf16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv8bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %off
-  %ptrcast = bitcast bfloat* %ptr to <vscale x 8 x bfloat>*
-  %val = load volatile <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %ptrcast
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
+  %val = load volatile <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %ptr
   ret <vscale x 8 x bfloat> %val
 }
 
-define <vscale x 4 x half> @ld1_nxv4f16(half* %addr, i64 %off) {
+define <vscale x 4 x half> @ld1_nxv4f16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %off
-  %ptrcast = bitcast half* %ptr to <vscale x 4 x half>*
-  %val = load volatile <vscale x 4 x half>, <vscale x 4 x half>* %ptrcast
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x half>, <vscale x 4 x half>* %ptr
   ret <vscale x 4 x half> %val
 }
 
-define <vscale x 4 x bfloat> @ld1_nxv4bf16(bfloat* %addr, i64 %off) {
+define <vscale x 4 x bfloat> @ld1_nxv4bf16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %off
-  %ptrcast = bitcast bfloat* %ptr to <vscale x 4 x bfloat>*
-  %val = load volatile <vscale x 4 x bfloat>, <vscale x 4 x bfloat>* %ptrcast
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x bfloat>, <vscale x 4 x bfloat>* %ptr
   ret <vscale x 4 x bfloat> %val
 }
 
-define <vscale x 2 x half> @ld1_nxv2f16(half* %addr, i64 %off) {
+define <vscale x 2 x half> @ld1_nxv2f16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %off
-  %ptrcast = bitcast half* %ptr to <vscale x 2 x half>*
-  %val = load volatile <vscale x 2 x half>, <vscale x 2 x half>* %ptrcast
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x half>, <vscale x 2 x half>* %ptr
   ret <vscale x 2 x half> %val
 }
 
-define <vscale x 2 x bfloat> @ld1_nxv2bf16(bfloat* %addr, i64 %off) {
+define <vscale x 2 x bfloat> @ld1_nxv2bf16(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %off
-  %ptrcast = bitcast bfloat* %ptr to <vscale x 2 x bfloat>*
-  %val = load volatile <vscale x 2 x bfloat>, <vscale x 2 x bfloat>* %ptrcast
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x bfloat>, <vscale x 2 x bfloat>* %ptr
   ret <vscale x 2 x bfloat> %val
 }
 
 ; LD1W
 
-define <vscale x 4 x i32> @ld1_nxv4i32(i32* %addr, i64 %off) {
+define <vscale x 4 x i32> @ld1_nxv4i32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %addr, i64 %off
-  %ptrcast = bitcast i32* %ptr to <vscale x 4 x i32>*
-  %val = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %ptrcast
+  %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %ptr
   ret <vscale x 4 x i32> %val
 }
 
-define <vscale x 2 x i64> @ld1_nxv2i64_zext32(i32* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv2i64_zext32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_zext32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %addr, i64 %off
-  %ptrcast = bitcast i32* %ptr to <vscale x 2 x i32>*
-  %val = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %ptrcast
+  %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %ptr
   %zext = zext <vscale x 2 x i32> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %zext
 }
 
-define <vscale x 2 x i64> @ld1_nxv2i64_sext32(i32* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv2i64_sext32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64_sext32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %addr, i64 %off
-  %ptrcast = bitcast i32* %ptr to <vscale x 2 x i32>*
-  %val = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %ptrcast
+  %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i32>, <vscale x 2 x i32>* %ptr
   %sext = sext <vscale x 2 x i32> %val to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %sext
 }
 
-define <vscale x 4 x float> @ld1_nxv4f32(float* %addr, i64 %off) {
+define <vscale x 4 x float> @ld1_nxv4f32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %addr, i64 %off
-  %ptrcast = bitcast float* %ptr to <vscale x 4 x float>*
-  %val = load volatile <vscale x 4 x float>, <vscale x 4 x float>* %ptrcast
+  %ptr = getelementptr inbounds float, ptr %addr, i64 %off
+  %val = load volatile <vscale x 4 x float>, <vscale x 4 x float>* %ptr
   ret <vscale x 4 x float> %val
 }
 
-define <vscale x 2 x float> @ld1_nxv2f32(float* %addr, i64 %off) {
+define <vscale x 2 x float> @ld1_nxv2f32(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %addr, i64 %off
-  %ptrcast = bitcast float* %ptr to <vscale x 2 x float>*
-  %val = load volatile <vscale x 2 x float>, <vscale x 2 x float>* %ptrcast
+  %ptr = getelementptr inbounds float, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x float>, <vscale x 2 x float>* %ptr
   ret <vscale x 2 x float> %val
 }
 
 ; LD1D
 
-define <vscale x 2 x i64> @ld1_nxv2i64(i64* %addr, i64 %off) {
+define <vscale x 2 x i64> @ld1_nxv2i64(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i64, i64* %addr, i64 %off
-  %ptrcast = bitcast i64* %ptr to <vscale x 2 x i64>*
-  %val = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %ptrcast
+  %ptr = getelementptr inbounds i64, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x i64>, <vscale x 2 x i64>* %ptr
   ret <vscale x 2 x i64> %val
 }
 
-define <vscale x 2 x double> @ld1_nxv2f64(double* %addr, i64 %off) {
+define <vscale x 2 x double> @ld1_nxv2f64(ptr %addr, i64 %off) {
 ; CHECK-LABEL: ld1_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds double, double* %addr, i64 %off
-  %ptrcast = bitcast double* %ptr to <vscale x 2 x double>*
-  %val = load volatile <vscale x 2 x double>, <vscale x 2 x double>* %ptrcast
+  %ptr = getelementptr inbounds double, ptr %addr, i64 %off
+  %val = load volatile <vscale x 2 x double>, <vscale x 2 x double>* %ptr
   ret <vscale x 2 x double> %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1r.ll b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
index 57aeb230f3bd8..d7a920960156d 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1r.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
@@ -28,792 +28,792 @@ define <vscale x 16 x i8> @ld1r_stack() {
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %valp = alloca i8
-  %valp2  = load volatile i8, i8* @g8
-  store volatile i8 %valp2, i8* %valp
-  %valp3 = getelementptr i8, i8* %valp, i32 2
-  %val = load i8, i8* %valp3
+  %valp2  = load volatile i8, ptr @g8
+  store volatile i8 %valp2, ptr %valp
+  %valp3 = getelementptr i8, ptr %valp, i32 2
+  %val = load i8, ptr %valp3
   %1 = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
   %2 = shufflevector <vscale x 16 x i8> %1, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   ret <vscale x 16 x i8> %2
 }
 
-define <vscale x 16 x i8> @ld1rb(i8* %valp) {
+define <vscale x 16 x i8> @ld1rb(ptr %valp) {
 ; CHECK-LABEL: ld1rb:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i8, i8* %valp
+  %val = load i8, ptr %valp
   %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
   %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   ret <vscale x 16 x i8> %shf
 }
 
-define <vscale x 16 x i8> @ld1rb_gep(i8* %valp) {
+define <vscale x 16 x i8> @ld1rb_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rb_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0, #63]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i8, i8* %valp, i32 63
-  %val = load i8, i8* %valp2
+  %valp2 = getelementptr i8, ptr %valp, i32 63
+  %val = load i8, ptr %valp2
   %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
   %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   ret <vscale x 16 x i8> %shf
 }
 
-define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(i8* %valp) {
+define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rb_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #64
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i8, i8* %valp, i32 64
-  %val = load i8, i8* %valp2
+  %valp2 = getelementptr i8, ptr %valp, i32 64
+  %val = load i8, ptr %valp2
   %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
   %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   ret <vscale x 16 x i8> %shf
 }
 
-define <vscale x 16 x i8> @ld1rb_gep_out_of_range_down(i8* %valp) {
+define <vscale x 16 x i8> @ld1rb_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rb_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #1
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i8, i8* %valp, i32 -1
-  %val = load i8, i8* %valp2
+  %valp2 = getelementptr i8, ptr %valp, i32 -1
+  %val = load i8, ptr %valp2
   %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
   %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   ret <vscale x 16 x i8> %shf
 }
 
-define <vscale x 8 x i16> @ld1rb_i8_i16_zext(i8* %valp) {
+define <vscale x 8 x i16> @ld1rb_i8_i16_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i8, i8* %valp
+  %val = load i8, ptr %valp
   %ext = zext i8 %val to i16
   %ins = insertelement <vscale x 8 x i16> undef, i16 %ext, i32 0
   %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i16> %shf
 }
 
-define <vscale x 8 x i16> @ld1rb_i8_i16_sext(i8* %valp) {
+define <vscale x 8 x i16> @ld1rb_i8_i16_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rsb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i8, i8* %valp
+  %val = load i8, ptr %valp
   %ext = sext i8 %val to i16
   %ins = insertelement <vscale x 8 x i16> undef, i16 %ext, i32 0
   %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i16> %shf
 }
 
-define <vscale x 4 x i32> @ld1rb_i8_i32_zext(i8* %valp) {
+define <vscale x 4 x i32> @ld1rb_i8_i32_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i8, i8* %valp
+  %val = load i8, ptr %valp
   %ext = zext i8 %val to i32
   %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 4 x i32> @ld1rb_i8_i32_sext(i8* %valp) {
+define <vscale x 4 x i32> @ld1rb_i8_i32_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rsb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i8, i8* %valp
+  %val = load i8, ptr %valp
   %ext = sext i8 %val to i32
   %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 2 x i64> @ld1rb_i8_i64_zext(i8* %valp) {
+define <vscale x 2 x i64> @ld1rb_i8_i64_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i8, i8* %valp
+  %val = load i8, ptr %valp
   %ext = zext i8 %val to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 2 x i64> @ld1rb_i8_i64_sext(i8* %valp) {
+define <vscale x 2 x i64> @ld1rb_i8_i64_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rb_i8_i64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i8, i8* %valp
+  %val = load i8, ptr %valp
   %ext = sext i8 %val to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 8 x i16> @ld1rh(i16* %valp) {
+define <vscale x 8 x i16> @ld1rh(ptr %valp) {
 ; CHECK-LABEL: ld1rh:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i16, i16* %valp
+  %val = load i16, ptr %valp
   %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
   %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i16> %shf
 }
 
-define <vscale x 8 x i16> @ld1rh_gep(i16* %valp) {
+define <vscale x 8 x i16> @ld1rh_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rh_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0, #126]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i16, i16* %valp, i32 63
-  %val = load i16, i16* %valp2
+  %valp2 = getelementptr i16, ptr %valp, i32 63
+  %val = load i16, ptr %valp2
   %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
   %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i16> %shf
 }
 
-define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(i16* %valp) {
+define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rh_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i16, i16* %valp, i32 64
-  %val = load i16, i16* %valp2
+  %valp2 = getelementptr i16, ptr %valp, i32 64
+  %val = load i16, ptr %valp2
   %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
   %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i16> %shf
 }
 
-define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(i16* %valp) {
+define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rh_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i16, i16* %valp, i32 -1
-  %val = load i16, i16* %valp2
+  %valp2 = getelementptr i16, ptr %valp, i32 -1
+  %val = load i16, ptr %valp2
   %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
   %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x i16> %shf
 }
 
-define <vscale x 4 x i32> @ld1rh_i16_i32_zext(i16* %valp) {
+define <vscale x 4 x i32> @ld1rh_i16_i32_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i16, i16* %valp
+  %val = load i16, ptr %valp
   %ext = zext i16 %val to i32
   %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 4 x i32> @ld1rh_i16_i32_sext(i16* %valp) {
+define <vscale x 4 x i32> @ld1rh_i16_i32_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rsh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i16, i16* %valp
+  %val = load i16, ptr %valp
   %ext = sext i16 %val to i32
   %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 2 x i64> @ld1rh_i16_i64_zext(i16* %valp) {
+define <vscale x 2 x i64> @ld1rh_i16_i64_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i16, i16* %valp
+  %val = load i16, ptr %valp
   %ext = zext i16 %val to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 2 x i64> @ld1rh_i16_i64_sext(i16* %valp) {
+define <vscale x 2 x i64> @ld1rh_i16_i64_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rh_i16_i64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rsh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i16, i16* %valp
+  %val = load i16, ptr %valp
   %ext = sext i16 %val to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 4 x i32> @ld1rw(i32* %valp) {
+define <vscale x 4 x i32> @ld1rw(ptr %valp) {
 ; CHECK-LABEL: ld1rw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valp
+  %val = load i32, ptr %valp
   %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 4 x i32> @ld1rw_gep(i32* %valp) {
+define <vscale x 4 x i32> @ld1rw_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rw_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0, #252]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i32, i32* %valp, i32 63
-  %val = load i32, i32* %valp2
+  %valp2 = getelementptr i32, ptr %valp, i32 63
+  %val = load i32, ptr %valp2
   %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(i32* %valp) {
+define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rw_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i32, i32* %valp, i32 64
-  %val = load i32, i32* %valp2
+  %valp2 = getelementptr i32, ptr %valp, i32 64
+  %val = load i32, ptr %valp2
   %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(i32* %valp) {
+define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rw_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i32, i32* %valp, i32 -1
-  %val = load i32, i32* %valp2
+  %valp2 = getelementptr i32, ptr %valp, i32 -1
+  %val = load i32, ptr %valp2
   %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
   %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x i32> %shf
 }
 
-define <vscale x 2 x i64> @ld1rw_i32_i64_zext(i32* %valp) {
+define <vscale x 2 x i64> @ld1rw_i32_i64_zext(ptr %valp) {
 ; CHECK-LABEL: ld1rw_i32_i64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valp
+  %val = load i32, ptr %valp
   %ext = zext i32 %val to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 2 x i64> @ld1rw_i32_i64_sext(i32* %valp) {
+define <vscale x 2 x i64> @ld1rw_i32_i64_sext(ptr %valp) {
 ; CHECK-LABEL: ld1rw_i32_i64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rsw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i32, i32* %valp
+  %val = load i32, ptr %valp
   %ext = sext i32 %val to i64
   %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 2 x i64> @ld1rd(i64* %valp) {
+define <vscale x 2 x i64> @ld1rd(ptr %valp) {
 ; CHECK-LABEL: ld1rd:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load i64, i64* %valp
+  %val = load i64, ptr %valp
   %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 2 x i64> @ld1rd_gep(i64* %valp) {
+define <vscale x 2 x i64> @ld1rd_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rd_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0, #504]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i64, i64* %valp, i32 63
-  %val = load i64, i64* %valp2
+  %valp2 = getelementptr i64, ptr %valp, i32 63
+  %val = load i64, ptr %valp2
   %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(i64* %valp) {
+define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rd_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #512
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i64, i64* %valp, i32 64
-  %val = load i64, i64* %valp2
+  %valp2 = getelementptr i64, ptr %valp, i32 64
+  %val = load i64, ptr %valp2
   %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(i64* %valp) {
+define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rd_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr i64, i64* %valp, i32 -1
-  %val = load i64, i64* %valp2
+  %valp2 = getelementptr i64, ptr %valp, i32 -1
+  %val = load i64, ptr %valp2
   %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
   %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x i64> %shf
 }
 
-define <vscale x 8 x half> @ld1rh_half(half* %valp) {
+define <vscale x 8 x half> @ld1rh_half(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load half, half* %valp
+  %val = load half, ptr %valp
   %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x half> %shf
 }
 
-define <vscale x 8 x half> @ld1rh_half_gep(half* %valp) {
+define <vscale x 8 x half> @ld1rh_half_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0, #126]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 63
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 63
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x half> %shf
 }
 
-define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(half* %valp) {
+define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 64
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 64
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x half> %shf
 }
 
-define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(half* %valp) {
+define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 -1
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 -1
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x half> %shf
 }
 
-define <vscale x 4 x half> @ld1rh_half_unpacked4(half* %valp) {
+define <vscale x 4 x half> @ld1rh_half_unpacked4(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load half, half* %valp
+  %val = load half, ptr %valp
   %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x half> %shf
 }
 
-define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(half* %valp) {
+define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked4_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0, #126]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 63
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 63
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x half> %shf
 }
 
-define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(half* %valp) {
+define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 64
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 64
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x half> %shf
 }
 
-define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(half* %valp) {
+define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 -1
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 -1
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x half> %shf
 }
 
-define <vscale x 2 x half> @ld1rh_half_unpacked2(half* %valp) {
+define <vscale x 2 x half> @ld1rh_half_unpacked2(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load half, half* %valp
+  %val = load half, ptr %valp
   %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x half> %shf
 }
 
-define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(half* %valp) {
+define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked2_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0, #126]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 63
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 63
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x half> %shf
 }
 
-define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(half* %valp) {
+define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 64
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 64
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x half> %shf
 }
 
-define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(half* %valp) {
+define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr half, half* %valp, i32 -1
-  %val = load half, half* %valp2
+  %valp2 = getelementptr half, ptr %valp, i32 -1
+  %val = load half, ptr %valp2
   %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
   %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x half> %shf
 }
 
-define <vscale x 4 x float> @ld1rw_float(float* %valp) {
+define <vscale x 4 x float> @ld1rw_float(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load float, float* %valp
+  %val = load float, ptr %valp
   %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x float> %shf
 }
 
-define <vscale x 4 x float> @ld1rw_float_gep(float* %valp) {
+define <vscale x 4 x float> @ld1rw_float_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0, #252]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr float, float* %valp, i32 63
-  %val = load float, float* %valp2
+  %valp2 = getelementptr float, ptr %valp, i32 63
+  %val = load float, ptr %valp2
   %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x float> %shf
 }
 
-define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(float* %valp) {
+define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr float, float* %valp, i32 64
-  %val = load float, float* %valp2
+  %valp2 = getelementptr float, ptr %valp, i32 64
+  %val = load float, ptr %valp2
   %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x float> %shf
 }
 
-define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(float* %valp) {
+define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr float, float* %valp, i32 -1
-  %val = load float, float* %valp2
+  %valp2 = getelementptr float, ptr %valp, i32 -1
+  %val = load float, ptr %valp2
   %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
   ret <vscale x 4 x float> %shf
 }
 
-define <vscale x 2 x float> @ld1rw_float_unpacked2(float* %valp) {
+define <vscale x 2 x float> @ld1rw_float_unpacked2(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float_unpacked2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load float, float* %valp
+  %val = load float, ptr %valp
   %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x float> %shf
 }
 
-define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(float* %valp) {
+define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float_unpacked2_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0, #252]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr float, float* %valp, i32 63
-  %val = load float, float* %valp2
+  %valp2 = getelementptr float, ptr %valp, i32 63
+  %val = load float, ptr %valp2
   %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x float> %shf
 }
 
-define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(float* %valp) {
+define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr float, float* %valp, i32 64
-  %val = load float, float* %valp2
+  %valp2 = getelementptr float, ptr %valp, i32 64
+  %val = load float, ptr %valp2
   %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x float> %shf
 }
 
-define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(float* %valp) {
+define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr float, float* %valp, i32 -1
-  %val = load float, float* %valp2
+  %valp2 = getelementptr float, ptr %valp, i32 -1
+  %val = load float, ptr %valp2
   %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
   %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x float> %shf
 }
 
-define <vscale x 2 x double> @ld1rd_double(double* %valp) {
+define <vscale x 2 x double> @ld1rd_double(ptr %valp) {
 ; CHECK-LABEL: ld1rd_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %val = load double, double* %valp
+  %val = load double, ptr %valp
   %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
   %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x double> %shf
 }
 
-define <vscale x 2 x double> @ld1rd_double_gep(double* %valp) {
+define <vscale x 2 x double> @ld1rd_double_gep(ptr %valp) {
 ; CHECK-LABEL: ld1rd_double_gep:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0, #504]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr double, double* %valp, i32 63
-  %val = load double, double* %valp2
+  %valp2 = getelementptr double, ptr %valp, i32 63
+  %val = load double, ptr %valp2
   %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
   %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x double> %shf
 }
 
-define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(double* %valp) {
+define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(ptr %valp) {
 ; CHECK-LABEL: ld1rd_double_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #512
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr double, double* %valp, i32 64
-  %val = load double, double* %valp2
+  %valp2 = getelementptr double, ptr %valp, i32 64
+  %val = load double, ptr %valp2
   %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
   %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x double> %shf
 }
 
-define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(double* %valp) {
+define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(ptr %valp) {
 ; CHECK-LABEL: ld1rd_double_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
-  %valp2 = getelementptr double, double* %valp, i32 -1
-  %val = load double, double* %valp2
+  %valp2 = getelementptr double, ptr %valp, i32 -1
+  %val = load double, ptr %valp2
   %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
   %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
   ret <vscale x 2 x double> %shf
 }
 
-define <vscale x 2 x double> @dupq_ld1rqd_f64(<2 x double>* %a) {
+define <vscale x 2 x double> @dupq_ld1rqd_f64(ptr %a) {
 ; CHECK-LABEL: dupq_ld1rqd_f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   %2 = tail call fast <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %1, i64 0)
   %3 = tail call fast <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %2, i64 0)
   ret <vscale x 2 x double> %3
 }
 
-define <vscale x 4 x float> @dupq_ld1rqw_f32(<4 x float>* %a) {
+define <vscale x 4 x float> @dupq_ld1rqw_f32(ptr %a) {
 ; CHECK-LABEL: dupq_ld1rqw_f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   %2 = tail call fast <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %1, i64 0)
   %3 = tail call fast <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %2, i64 0)
   ret <vscale x 4 x float> %3
 }
 
-define <vscale x 8 x half> @dupq_ld1rqh_f16(<8 x half>* %a) {
+define <vscale x 8 x half> @dupq_ld1rqh_f16(ptr %a) {
 ; CHECK-LABEL: dupq_ld1rqh_f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <8 x half>, <8 x half>* %a
+  %1 = load <8 x half>, ptr %a
   %2 = tail call fast <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %1, i64 0)
   %3 = tail call fast <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %2, i64 0)
   ret <vscale x 8 x half> %3
 }
 
-define <vscale x 8 x bfloat> @dupq_ld1rqh_bf16(<8 x bfloat>* %a) #0 {
+define <vscale x 8 x bfloat> @dupq_ld1rqh_bf16(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqh_bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <8 x bfloat>, <8 x bfloat>* %a
+  %1 = load <8 x bfloat>, ptr %a
   %2 = tail call fast <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %1, i64 0)
   %3 = tail call fast <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %2, i64 0)
   ret <vscale x 8 x bfloat> %3
 }
 
-define <vscale x 2 x i64> @dupq_ld1rqd_i64(<2 x i64>* %a) #0 {
+define <vscale x 2 x i64> @dupq_ld1rqd_i64(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqd_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %1, i64 0)
   %3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2, i64 0)
   ret <vscale x 2 x i64> %3
 }
 
-define <vscale x 4 x i32> @dupq_ld1rqw_i32(<4 x i32>* %a) #0 {
+define <vscale x 4 x i32> @dupq_ld1rqw_i32(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqw_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %1, i64 0)
   %3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2, i64 0)
   ret <vscale x 4 x i32> %3
 }
 
-define <vscale x 8 x i16> @dupq_ld1rqw_i16(<8 x i16>* %a) #0 {
+define <vscale x 8 x i16> @dupq_ld1rqw_i16(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqw_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %1, i64 0)
   %3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2, i64 0)
   ret <vscale x 8 x i16> %3
 }
 
-define <vscale x 16 x i8> @dupq_ld1rqw_i8(<16 x i8>* %a) #0 {
+define <vscale x 16 x i8> @dupq_ld1rqw_i8(ptr %a) #0 {
 ; CHECK-LABEL: dupq_ld1rqw_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %1, i64 0)
   %3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2, i64 0)
   ret <vscale x 16 x i8> %3
@@ -833,355 +833,355 @@ define <vscale x 16 x i8> @dupq_ld1rqw_i8(<16 x i8>* %a) #0 {
 ;
 ;
 
-define <vscale x 16 x i8> @dup_ld1rb_i8_passthruundef_nxv16i8(<vscale x 16 x i1> %pg, i8* %addr) {
+define <vscale x 16 x i8> @dup_ld1rb_i8_passthruundef_nxv16i8(<vscale x 16 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rb_i8_passthruundef_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, i8 %ld)
     ret <vscale x 16 x i8> %res
 }
-define <vscale x 8 x i16> @dup_ld1rh_i16_passthruundef_nxv8i16(<vscale x 8 x i1> %pg, i16* %addr) {
+define <vscale x 8 x i16> @dup_ld1rh_i16_passthruundef_nxv8i16(<vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_i16_passthruundef_nxv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i16, i16* %addr
+    %ld = load i16, ptr %addr
     %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ld)
     ret <vscale x 8 x i16> %res
 }
-define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_sext(<vscale x 8 x i1> %pg, i8* %addr) {
+define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_sext(<vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_i8_passthruundef_nxv8i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rsb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %ext = sext i8 %ld to i16
     %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ext)
     ret <vscale x 8 x i16> %res
 }
-define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_zext(<vscale x 8 x i1> %pg, i8* %addr) {
+define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_zext(<vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_i8_passthruundef_nxv8i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rb { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %ext = zext i8 %ld to i16
     %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ext)
     ret <vscale x 8 x i16> %res
 }
-define <vscale x 4 x i32> @dup_ld1rs_i32_passthruundef_nxv4i32(<vscale x 4 x i1> %pg, i32* %addr) {
+define <vscale x 4 x i32> @dup_ld1rs_i32_passthruundef_nxv4i32(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i32_passthruundef_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i32, i32* %addr
+    %ld = load i32, ptr %addr
     %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ld)
     ret <vscale x 4 x i32> %res
 }
-define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, i8* %addr) {
+define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv4i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rsb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %ext = sext i8 %ld to i32
     %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
     ret <vscale x 4 x i32> %res
 }
-define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, i8* %addr) {
+define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv4i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rb { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %ext = zext i8 %ld to i32
     %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
     ret <vscale x 4 x i32> %res
 }
-define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, i16* %addr) {
+define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv4i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rsh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i16, i16* %addr
+    %ld = load i16, ptr %addr
     %ext = sext i16 %ld to i32
     %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
     ret <vscale x 4 x i32> %res
 }
-define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, i16* %addr) {
+define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv4i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i16, i16* %addr
+    %ld = load i16, ptr %addr
     %ext = zext i16 %ld to i32
     %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
     ret <vscale x 4 x i32> %res
 }
-define <vscale x 2 x i64> @dup_ld1rd_i64_passthruundef_nxv2i64(<vscale x 2 x i1> %pg, i64* %addr) {
+define <vscale x 2 x i64> @dup_ld1rd_i64_passthruundef_nxv2i64(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rd_i64_passthruundef_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i64, i64* %addr
+    %ld = load i64, ptr %addr
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ld)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, i8* %addr) {
+define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv2i64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %ext = sext i8 %ld to i64
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, i8* %addr) {
+define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv2i64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rb { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %ext = zext i8 %ld to i64
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, i16* %addr) {
+define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv2i64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rsh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i16, i16* %addr
+    %ld = load i16, ptr %addr
     %ext = sext i16 %ld to i64
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, i16* %addr) {
+define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv2i64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i16, i16* %addr
+    %ld = load i16, ptr %addr
     %ext = zext i16 %ld to i64
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, i32* %addr) {
+define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i32_passthruundef_nxv2i64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rsw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i32, i32* %addr
+    %ld = load i32, ptr %addr
     %ext = sext i32 %ld to i64
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, i32* %addr) {
+define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i32_passthruundef_nxv2i64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i32, i32* %addr
+    %ld = load i32, ptr %addr
     %ext = zext i32 %ld to i64
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 8 x half> @dup_ld1rh_half_passthruundef_nxv8f16(<vscale x 8 x i1> %pg, half* %addr) {
+define <vscale x 8 x half> @dup_ld1rh_half_passthruundef_nxv8f16(<vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_half_passthruundef_nxv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load half, half* %addr
+    %ld = load half, ptr %addr
     %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %pg, half %ld)
     ret <vscale x 8 x half> %res
 }
-define <vscale x 4 x float> @dup_ld1rs_float_passthruundef_nxv4f32(<vscale x 4 x i1> %pg, float* %addr) {
+define <vscale x 4 x float> @dup_ld1rs_float_passthruundef_nxv4f32(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_float_passthruundef_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load float, float* %addr
+    %ld = load float, ptr %addr
     %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %pg, float %ld)
     ret <vscale x 4 x float> %res
 }
-define <vscale x 2 x double> @dup_ld1rd_double_passthruundef_nxv2f64(<vscale x 2 x i1> %pg, double* %addr) {
+define <vscale x 2 x double> @dup_ld1rd_double_passthruundef_nxv2f64(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rd_double_passthruundef_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load double, double* %addr
+    %ld = load double, ptr %addr
     %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %pg, double %ld)
     ret <vscale x 2 x double> %res
 }
-define <vscale x 4 x half> @dup_ld1rh_half_passthruundef_nxv4f16(<vscale x 4 x i1> %pg, half* %addr) {
+define <vscale x 4 x half> @dup_ld1rh_half_passthruundef_nxv4f16(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_half_passthruundef_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load half, half* %addr
+    %ld = load half, ptr %addr
     %res = call <vscale x 4 x half> @llvm.aarch64.sve.dup.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> %pg, half %ld)
     ret <vscale x 4 x half> %res
 }
-define <vscale x 16 x i8> @dup_ld1rb_i8_passthruzero_nxv16i8(<vscale x 16 x i1> %pg, i8* %addr) {
+define <vscale x 16 x i8> @dup_ld1rb_i8_passthruzero_nxv16i8(<vscale x 16 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rb_i8_passthruzero_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 %ld)
     ret <vscale x 16 x i8> %res
 }
-define <vscale x 8 x i16> @dup_ld1rh_i16_passthruzero_nxv8i16(<vscale x 8 x i1> %pg, i16* %addr) {
+define <vscale x 8 x i16> @dup_ld1rh_i16_passthruzero_nxv8i16(<vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_i16_passthruzero_nxv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i16, i16* %addr
+    %ld = load i16, ptr %addr
     %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, i16 %ld)
     ret <vscale x 8 x i16> %res
 }
-define <vscale x 4 x i32> @dup_ld1rs_i32_passthruzero_nxv4i32(<vscale x 4 x i1> %pg, i32* %addr) {
+define <vscale x 4 x i32> @dup_ld1rs_i32_passthruzero_nxv4i32(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_i32_passthruzero_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i32, i32* %addr
+    %ld = load i32, ptr %addr
     %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i32 %ld)
     ret <vscale x 4 x i32> %res
 }
-define <vscale x 2 x i64> @dup_ld1rd_i64_passthruzero_nxv2i64(<vscale x 2 x i1> %pg, i64* %addr) {
+define <vscale x 2 x i64> @dup_ld1rd_i64_passthruzero_nxv2i64(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rd_i64_passthruzero_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load i64, i64* %addr
+    %ld = load i64, ptr %addr
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 %ld)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 8 x half> @dup_ld1rh_half_passthruzero_nxv8f16(<vscale x 8 x i1> %pg, half* %addr) {
+define <vscale x 8 x half> @dup_ld1rh_half_passthruzero_nxv8f16(<vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_half_passthruzero_nxv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load half, half* %addr
+    %ld = load half, ptr %addr
     %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, half %ld)
     ret <vscale x 8 x half> %res
 }
-define <vscale x 4 x float> @dup_ld1rs_float_passthruzero_nxv4f32(<vscale x 4 x i1> %pg, float* %addr) {
+define <vscale x 4 x float> @dup_ld1rs_float_passthruzero_nxv4f32(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_float_passthruzero_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load float, float* %addr
+    %ld = load float, ptr %addr
     %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, float %ld)
     ret <vscale x 4 x float> %res
 }
-define <vscale x 2 x double> @dup_ld1rd_double_passthruzero_nxv2f64(<vscale x 2 x i1> %pg, double* %addr) {
+define <vscale x 2 x double> @dup_ld1rd_double_passthruzero_nxv2f64(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rd_double_passthruzero_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load double, double* %addr
+    %ld = load double, ptr %addr
     %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, double %ld)
     ret <vscale x 2 x double> %res
 }
-define <vscale x 4 x half> @dup_ld1rh_half_passthruzero_nxv4f16(<vscale x 4 x i1> %pg, half* %addr) {
+define <vscale x 4 x half> @dup_ld1rh_half_passthruzero_nxv4f16(<vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_half_passthruzero_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load half, half* %addr
+    %ld = load half, ptr %addr
     %res = call <vscale x 4 x half> @llvm.aarch64.sve.dup.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, half %ld)
     ret <vscale x 4 x half> %res
 }
-define <vscale x 2 x half> @dup_ld1rh_half_passthruzero_nxv2f16(<vscale x 2 x i1> %pg, half* %addr) {
+define <vscale x 2 x half> @dup_ld1rh_half_passthruzero_nxv2f16(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rh_half_passthruzero_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load half, half* %addr
+    %ld = load half, ptr %addr
     %res = call <vscale x 2 x half> @llvm.aarch64.sve.dup.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, half %ld)
     ret <vscale x 2 x half> %res
 }
-define <vscale x 2 x float> @dup_ld1rs_float_passthruzero_nxv2f32(<vscale x 2 x i1> %pg, float* %addr) {
+define <vscale x 2 x float> @dup_ld1rs_float_passthruzero_nxv2f32(<vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: dup_ld1rs_float_passthruzero_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ret
-    %ld = load float, float* %addr
+    %ld = load float, ptr %addr
     %res = call <vscale x 2 x float> @llvm.aarch64.sve.dup.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, float %ld)
     ret <vscale x 2 x float> %res
 }
-define <vscale x 16 x i8> @negtest_dup_ld1rb_i8_passthru_nxv16i8(<vscale x 16 x i8> %pt, <vscale x 16 x i1> %pg, i8* %addr) {
+define <vscale x 16 x i8> @negtest_dup_ld1rb_i8_passthru_nxv16i8(<vscale x 16 x i8> %pt, <vscale x 16 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: negtest_dup_ld1rb_i8_passthru_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x0]
 ; CHECK-NEXT:    mov z0.b, p0/m, w8
 ; CHECK-NEXT:    ret
-    %ld = load i8, i8* %addr
+    %ld = load i8, ptr %addr
     %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %pt, <vscale x 16 x i1> %pg, i8 %ld)
     ret <vscale x 16 x i8> %res
 }
-define <vscale x 8 x i16> @negtest_dup_ld1rh_i16_passthru_nxv8i16(<vscale x 8 x i16> %pt, <vscale x 8 x i1> %pg, i16* %addr) {
+define <vscale x 8 x i16> @negtest_dup_ld1rh_i16_passthru_nxv8i16(<vscale x 8 x i16> %pt, <vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: negtest_dup_ld1rh_i16_passthru_nxv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x0]
 ; CHECK-NEXT:    mov z0.h, p0/m, w8
 ; CHECK-NEXT:    ret
-    %ld = load i16, i16* %addr
+    %ld = load i16, ptr %addr
     %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %pt, <vscale x 8 x i1> %pg, i16 %ld)
     ret <vscale x 8 x i16> %res
 }
-define <vscale x 4 x i32> @negtest_dup_ld1rs_i32_passthru_nxv4i32(<vscale x 4 x i32> %pt, <vscale x 4 x i1> %pg, i32* %addr) {
+define <vscale x 4 x i32> @negtest_dup_ld1rs_i32_passthru_nxv4i32(<vscale x 4 x i32> %pt, <vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: negtest_dup_ld1rs_i32_passthru_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    mov z0.s, p0/m, w8
 ; CHECK-NEXT:    ret
-    %ld = load i32, i32* %addr
+    %ld = load i32, ptr %addr
     %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %pt, <vscale x 4 x i1> %pg, i32 %ld)
     ret <vscale x 4 x i32> %res
 }
-define <vscale x 2 x i64> @negtest_dup_ld1rd_i64_passthru_nxv2i64(<vscale x 2 x i64> %pt, <vscale x 2 x i1> %pg, i64* %addr) {
+define <vscale x 2 x i64> @negtest_dup_ld1rd_i64_passthru_nxv2i64(<vscale x 2 x i64> %pt, <vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: negtest_dup_ld1rd_i64_passthru_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    mov z0.d, p0/m, x8
 ; CHECK-NEXT:    ret
-    %ld = load i64, i64* %addr
+    %ld = load i64, ptr %addr
     %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %pt, <vscale x 2 x i1> %pg, i64 %ld)
     ret <vscale x 2 x i64> %res
 }
-define <vscale x 8 x half> @negtest_dup_ld1rh_half_passthru_nxv8f16(<vscale x 8 x half> %pt, <vscale x 8 x i1> %pg, half* %addr) {
+define <vscale x 8 x half> @negtest_dup_ld1rh_half_passthru_nxv8f16(<vscale x 8 x half> %pt, <vscale x 8 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: negtest_dup_ld1rh_half_passthru_nxv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h1, [x0]
 ; CHECK-NEXT:    mov z0.h, p0/m, h1
 ; CHECK-NEXT:    ret
-    %ld = load half, half* %addr
+    %ld = load half, ptr %addr
     %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> %pt, <vscale x 8 x i1> %pg, half %ld)
     ret <vscale x 8 x half> %res
 }
-define <vscale x 4 x float> @negtest_dup_ld1rs_float_passthru_nxv4f32(<vscale x 4 x float> %pt, <vscale x 4 x i1> %pg, float* %addr) {
+define <vscale x 4 x float> @negtest_dup_ld1rs_float_passthru_nxv4f32(<vscale x 4 x float> %pt, <vscale x 4 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: negtest_dup_ld1rs_float_passthru_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s1, [x0]
 ; CHECK-NEXT:    mov z0.s, p0/m, s1
 ; CHECK-NEXT:    ret
-    %ld = load float, float* %addr
+    %ld = load float, ptr %addr
     %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> %pt, <vscale x 4 x i1> %pg, float %ld)
     ret <vscale x 4 x float> %res
 }
-define <vscale x 2 x double> @negtest_dup_ld1rd_double_passthru_nxv2f64(<vscale x 2 x double> %pt, <vscale x 2 x i1> %pg, double* %addr) {
+define <vscale x 2 x double> @negtest_dup_ld1rd_double_passthru_nxv2f64(<vscale x 2 x double> %pt, <vscale x 2 x i1> %pg, ptr %addr) {
 ; CHECK-LABEL: negtest_dup_ld1rd_double_passthru_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d1, [x0]
 ; CHECK-NEXT:    mov z0.d, p0/m, d1
 ; CHECK-NEXT:    ret
-    %ld = load double, double* %addr
+    %ld = load double, ptr %addr
     %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> %pt, <vscale x 2 x i1> %pg, double %ld)
     ret <vscale x 2 x double> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
index 25d0a471c29a0..ef77a62aa0e8d 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-scaled.ll
@@ -6,96 +6,96 @@
 ; unscaled unpacked 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(i16* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(i32* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(i64* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(half* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr half, half* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %ptrs = getelementptr half, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(bfloat* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, float* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %ptrs = getelementptr float, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(double* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, double* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %ptrs = getelementptr double, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(i16* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(i32* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
@@ -104,79 +104,79 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(i32* %base, <vscale x 2 x i32>
 ; unscaled packed 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 4 x i32> @masked_gather_nxv4i16(i16* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.zext
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i32(i32* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %vals
 }
 
-define <vscale x 4 x half> @masked_gather_nxv4f16(half* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr half, half* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  %ptrs = getelementptr half, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
   ret <vscale x 4 x half> %vals
 }
 
-define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(bfloat* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
   ret <vscale x 4 x bfloat> %vals
 }
 
-define <vscale x 4 x float> @masked_gather_nxv4f32(float* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, float* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+  %ptrs = getelementptr float, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
   ret <vscale x 4 x float> %vals
 }
 
-define <vscale x 4 x i32> @masked_sgather_nxv4i16(i16* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.sext
 }
 
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
-
-declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
-declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+
+declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
+declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
index b9bf9049d46f8..5e7c79b92dabc 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-signed-unscaled.ll
@@ -6,127 +6,127 @@
 ; unscaled unpacked 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 2 x i64> @masked_gather_nxv2i8(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i8(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i8(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i32> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
@@ -135,109 +135,109 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(i8* %base, <vscale x 2 x i32>
 ; unscaled packed 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 4 x i32> @masked_gather_nxv4i8(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i8(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %vals.zext = zext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.zext
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i16*>
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.zext
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i32(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i32*>
-  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %vals
 }
 
-define <vscale x 4 x half> @masked_gather_nxv4f16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x half*>
-  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
   ret <vscale x 4 x half> %vals
 }
 
-define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x bfloat*>
-  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
   ret <vscale x 4 x bfloat> %vals
 }
 
-define <vscale x 4 x float> @masked_gather_nxv4f32(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x float*>
-  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
   ret <vscale x 4 x float> %vals
 }
 
-define <vscale x 4 x i32> @masked_sgather_nxv4i8(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sgather_nxv4i8(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %vals.sext = sext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.sext
 }
 
-define <vscale x 4 x i32> @masked_sgather_nxv4i16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i32> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i16*>
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i32> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.sext
 }
 
-declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
-
-declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
-declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+
+declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
+declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
index c7f8a76775278..895fda758748b 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-scaled.ll
@@ -6,105 +6,105 @@
 ; unscaled unpacked 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(i16* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(i32* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(i64* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(half* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(bfloat* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(double* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr double, double* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(i16* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(i32* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
@@ -113,85 +113,85 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(i32* %base, <vscale x 2 x i32>
 ; unscaled packed 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 4 x i32> @masked_gather_nxv4i16(i16* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.zext
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i32(i32* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %vals
 }
 
-define <vscale x 4 x half> @masked_gather_nxv4f16(half* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr half, half* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  %ptrs = getelementptr half, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
   ret <vscale x 4 x half> %vals
 }
 
-define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(bfloat* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
   ret <vscale x 4 x bfloat> %vals
 }
 
-define <vscale x 4 x float> @masked_gather_nxv4f32(float* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+  %ptrs = getelementptr float, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
   ret <vscale x 4 x float> %vals
 }
 
-define <vscale x 4 x i32> @masked_sgather_nxv4i16(i16* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.sext
 }
 
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
-
-declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
-declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+
+declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
+declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
index fe7290fb1fe82..f5e31c0cedd1e 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-32b-unsigned-unscaled.ll
@@ -6,138 +6,138 @@
 ; unscaled unpacked 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 2 x i64> @masked_gather_nxv2i8(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i8(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i8(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(i8* %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
@@ -146,117 +146,117 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(i8* %base, <vscale x 2 x i32>
 ; unscaled packed 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define <vscale x 4 x i32> @masked_gather_nxv4i8(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i8(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %vals.zext = zext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.zext
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i16*>
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.zext = zext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.zext
 }
 
-define <vscale x 4 x i32> @masked_gather_nxv4i32(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_gather_nxv4i32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i32*>
-  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
   ret <vscale x 4 x i32> %vals
 }
 
-define <vscale x 4 x half> @masked_gather_nxv4f16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @masked_gather_nxv4f16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x half*>
-  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
   ret <vscale x 4 x half> %vals
 }
 
-define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x bfloat> @masked_gather_nxv4bf16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x bfloat*>
-  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x bfloat> undef)
   ret <vscale x 4 x bfloat> %vals
 }
 
-define <vscale x 4 x float> @masked_gather_nxv4f32(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @masked_gather_nxv4f32(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x float*>
-  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr> %ptrs, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
   ret <vscale x 4 x float> %vals
 }
 
-define <vscale x 4 x i32> @masked_sgather_nxv4i8(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sgather_nxv4i8(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %vals.sext = sext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.sext
 }
 
-define <vscale x 4 x i32> @masked_sgather_nxv4i16(i8* %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @masked_sgather_nxv4i16(ptr %base, <vscale x 4 x i32> %offsets, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 4 x i32> %offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets.zext
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i16*>
-  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets.zext
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  %vals = call <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr> %ptrs, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
   %vals.sext = sext <vscale x 4 x i16> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %vals.sext
 }
 
-declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 
-declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x i16*>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x i32*>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
-declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
-declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x float*>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 4 x bfloat> @llvm.masked.gather.nxv4bf16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x bfloat>)
+declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
index c594f2c488e31..ce25b689c8d0e 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-scaled.ll
@@ -2,105 +2,105 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=0 < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=1 < %s | FileCheck %s
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(i16* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(i32* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(i64* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(half* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(bfloat* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(double* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, double* %base, <vscale x 2 x i64> %offsets
-  %vals.sext = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %offsets
+  %vals.sext = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(i16* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(i32* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
index beb5bf3d28baf..ba9be548660d4 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-64b-unscaled.ll
@@ -2,137 +2,137 @@
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=0 < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -aarch64-enable-mgather-combine=1 < %s | FileCheck %s
 
-define <vscale x 2 x i64> @masked_gather_nxv2i8(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i8(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i8(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
index ed3f784160c24..767789866a0bf 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll
@@ -6,7 +6,7 @@ target triple = "aarch64-linux-gnu"
 
 ; Test for multiple uses of the mgather where the s/zext should not be combined
 
-define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
+define <vscale x 2 x i64> @masked_sgather_sext(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
 ; CHECK-LABEL: masked_sgather_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d]
@@ -17,8 +17,8 @@ define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %of
 ; CHECK-NEXT:    sxtb z0.d, p0/m, z0.d
 ; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z2.d
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %data.sext = sext <vscale x 2 x i8> %data to <vscale x 2 x i64>
   %add = add <vscale x 2 x i8> %data, %vals
   %add.sext = sext <vscale x 2 x i8> %add to <vscale x 2 x i64>
@@ -26,7 +26,7 @@ define <vscale x 2 x i64> @masked_sgather_sext(i8* %base, <vscale x 2 x i64> %of
   ret <vscale x 2 x i64> %mul
 }
 
-define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
+define <vscale x 2 x i64> @masked_sgather_zext(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask, <vscale x 2 x i8> %vals) #0 {
 ; CHECK-LABEL: masked_sgather_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d]
@@ -36,8 +36,8 @@ define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %of
 ; CHECK-NEXT:    and z1.d, z1.d, #0xff
 ; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %data.zext = zext <vscale x 2 x i8> %data to <vscale x 2 x i64>
   %add = add <vscale x 2 x i8> %data, %vals
   %add.zext = zext <vscale x 2 x i8> %add to <vscale x 2 x i64>
@@ -48,36 +48,36 @@ define <vscale x 2 x i64> @masked_sgather_zext(i8* %base, <vscale x 2 x i64> %of
 ; Tests that exercise various type legalisation scenarios for ISD::MGATHER.
 
 ; Code generate load of an illegal datatype via promotion.
-define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x i8> @masked_gather_nxv2i8(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %data = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   ret <vscale x 2 x i8> %data
 }
 
 ; Code generate load of an illegal datatype via promotion.
-define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x i16> @masked_gather_nxv2i16(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %data = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %data = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   ret <vscale x 2 x i16> %data
 }
 
 ; Code generate load of an illegal datatype via promotion.
-define <vscale x 2 x i32> @masked_gather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x i32> @masked_gather_nxv2i32(<vscale x 2 x ptr> %ptrs, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    ret
-  %data = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %data = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   ret <vscale x 2 x i32> %data
 }
 
-define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
@@ -86,23 +86,23 @@ define <vscale x 4 x half> @masked_gather_nxv4f16(<vscale x 4 x half*> %ptrs, <v
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d]
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
-  %data = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*> %ptrs, i32 0, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  %data = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
   ret <vscale x 4 x half> %data
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(float* %base, <vscale x 2 x i16> %indices, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x float> @masked_gather_nxv2f32(ptr %base, <vscale x 2 x i16> %indices, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    sxth z0.d, p1/m, z0.d
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, float* %base, <vscale x 2 x i16> %indices
-  %data = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %ptrs = getelementptr float, ptr %base, <vscale x 2 x i16> %indices
+  %data = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %data
 }
 
-define <vscale x 8 x half> @masked_gather_nxv8f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %mask) #0 {
+define <vscale x 8 x half> @masked_gather_nxv8f16(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
@@ -119,11 +119,11 @@ define <vscale x 8 x half> @masked_gather_nxv8f16(<vscale x 8 x half*> %ptrs, <v
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
 ; CHECK-NEXT:    ret
-  %data = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x half*> %ptrs, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
+  %data = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x ptr> %ptrs, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
   ret <vscale x 8 x half> %data
 }
 
-define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(bfloat* %base, <vscale x 8 x i16> %indices, <vscale x 8 x i1> %mask) #0 {
+define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(ptr %base, <vscale x 8 x i16> %indices, <vscale x 8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv8bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sunpkhi z1.s, z0.h
@@ -134,12 +134,12 @@ define <vscale x 8 x bfloat> @masked_gather_nxv8bf16(bfloat* %base, <vscale x 8
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 8 x i16> %indices
-  %data = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x bfloat*> %ptrs, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 8 x i16> %indices
+  %data = call <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x bfloat> undef)
   ret <vscale x 8 x bfloat> %data
 }
 
-define <vscale x 4 x double> @masked_gather_nxv4f64(double* %base, <vscale x 4 x i16> %indices, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x double> @masked_gather_nxv4f64(ptr %base, <vscale x 4 x i16> %indices, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.s
@@ -152,12 +152,12 @@ define <vscale x 4 x double> @masked_gather_nxv4f64(double* %base, <vscale x 4 x
 ; CHECK-NEXT:    ld1d { z0.d }, p1/z, [x0, z0.d, lsl #3]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0, z1.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, double* %base, <vscale x 4 x i16> %indices
-  %data = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x double*> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
+  %ptrs = getelementptr double, ptr %base, <vscale x 4 x i16> %indices
+  %data = call <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
   ret <vscale x 4 x double> %data
 }
 
-define <vscale x 8 x float> @masked_gather_nxv8f32(float* %base, <vscale x 8 x i32> %offsets, <vscale x 8 x i1> %mask) #0 {
+define <vscale x 8 x float> @masked_gather_nxv8f32(ptr %base, <vscale x 8 x i32> %offsets, <vscale x 8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpklo p1.h, p0.b
@@ -166,13 +166,13 @@ define <vscale x 8 x float> @masked_gather_nxv8f32(float* %base, <vscale x 8 x i
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x0, z1.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %offsets.zext = zext <vscale x 8 x i32> %offsets to <vscale x 8 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 8 x i64> %offsets.zext
-  %vals = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
+  %ptrs = getelementptr float, ptr %base, <vscale x 8 x i64> %offsets.zext
+  %vals = call <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x ptr> %ptrs, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
   ret <vscale x 8 x float> %vals
 }
 
 ; Code generate the worst case scenario when all vector types are legal.
-define <vscale x 16 x i8> @masked_gather_nxv16i8(i8* %base, <vscale x 16 x i8> %indices, <vscale x 16 x i1> %mask) #0 {
+define <vscale x 16 x i8> @masked_gather_nxv16i8(ptr %base, <vscale x 16 x i8> %indices, <vscale x 16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sunpkhi z1.h, z0.b
@@ -195,13 +195,13 @@ define <vscale x 16 x i8> @masked_gather_nxv16i8(i8* %base, <vscale x 16 x i8> %
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z2.h
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 16 x i8> %indices
-  %data = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x i8*> %ptrs, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 16 x i8> %indices
+  %data = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x ptr> %ptrs, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
   ret <vscale x 16 x i8> %data
 }
 
 ; Code generate the worst case scenario when all vector types are illegal.
-define <vscale x 32 x i32> @masked_gather_nxv32i32(i32* %base, <vscale x 32 x i32> %indices, <vscale x 32 x i1> %mask) #0 {
+define <vscale x 32 x i32> @masked_gather_nxv32i32(ptr %base, <vscale x 32 x i32> %indices, <vscale x 32 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv32i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpklo p2.h, p0.b
@@ -225,16 +225,16 @@ define <vscale x 32 x i32> @masked_gather_nxv32i32(i32* %base, <vscale x 32 x i3
 ; CHECK-NEXT:    ld1w { z6.s }, p1/z, [x0, z6.s, sxtw #2]
 ; CHECK-NEXT:    ld1w { z7.s }, p0/z, [x0, z7.s, sxtw #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 32 x i32> %indices
-  %data = call <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x i32*> %ptrs, i32 4, <vscale x 32 x i1> %mask, <vscale x 32 x i32> undef)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 32 x i32> %indices
+  %data = call <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x ptr> %ptrs, i32 4, <vscale x 32 x i1> %mask, <vscale x 32 x i32> undef)
   ret <vscale x 32 x i32> %data
 }
 
 ; TODO: Currently, the sign extend gets applied to the values after a 'uzp1' of two
 ; registers, so it doesn't get folded away. Same for any other vector-of-pointers
-; style gathers which don't fit in an <vscale x 2 x type*> single register. Better folding
+; style gathers which don't fit in an <vscale x 2 x ptr> single register. Better folding
 ; is required before we can check those off.
-define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %mask) #0 {
+define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x ptr> %ptrs, <vscale x 4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_sgather_nxv4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpkhi p1.h, p0.b
@@ -245,23 +245,23 @@ define <vscale x 4 x i32> @masked_sgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vsca
 ; CHECK-NEXT:    uzp1 z0.s, z0.s, z1.s
 ; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
 ; CHECK-NEXT:    ret
-  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  %vals = call <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr> %ptrs, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
   %svals = sext <vscale x 4 x i8> %vals to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %svals
 }
 
 attributes #0 = { nounwind "target-features"="+sve,+bf16" }
 
-declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x i8*>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
-declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x i8*>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
-declare <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x i32*>, i32, <vscale x 32 x i1>, <vscale x 32 x i32>)
+declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+declare <vscale x 32 x i32> @llvm.masked.gather.nxv32i32(<vscale x 32 x ptr>, i32, <vscale x 32 x i1>, <vscale x 32 x i32>)
 
-declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x half*>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
-declare <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x half*>, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
-declare <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x bfloat*>, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x float*>, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
-declare <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x double*>, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
+declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+declare <vscale x 8 x half> @llvm.masked.gather.nxv8f16(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
+declare <vscale x 8 x bfloat> @llvm.masked.gather.nxv8bf16(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 8 x float> @llvm.masked.gather.nxv8f32(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
+declare <vscale x 4 x double> @llvm.masked.gather.nxv4f64(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x double>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
index d2f595ebef760..9e34beedf5458 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
@@ -1,186 +1,186 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x i8*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [z0.d, #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i32 1
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 1
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x i16*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d, #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, <vscale x 2 x i16*> %bases, i32 1
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, <vscale x 2 x ptr> %bases, i32 1
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x i32*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z0.d, #4]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, <vscale x 2 x i32*> %bases, i32 1
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, <vscale x 2 x ptr> %bases, i32 1
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x i64*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [z0.d, #8]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i64, <vscale x 2 x i64*> %bases, i32 1
-  %vals.zext = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %ptrs = getelementptr i64, <vscale x 2 x ptr> %bases, i32 1
+  %vals.zext = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x half*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d, #4]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr half, <vscale x 2 x half*> %bases, i32 2
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %ptrs = getelementptr half, <vscale x 2 x ptr> %bases, i32 2
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x bfloat*> %bases, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [z0.d, #4]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, <vscale x 2 x bfloat*> %bases, i32 2
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %ptrs = getelementptr bfloat, <vscale x 2 x ptr> %bases, i32 2
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x float*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [z0.d, #12]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, <vscale x 2 x float*> %bases, i32 3
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %ptrs = getelementptr float, <vscale x 2 x ptr> %bases, i32 3
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x double*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [z0.d, #32]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, <vscale x 2 x double*> %bases, i32 4
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %ptrs = getelementptr double, <vscale x 2 x ptr> %bases, i32 4
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x i8*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [z0.d, #5]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i32 5
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 5
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x i16*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [z0.d, #12]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, <vscale x 2 x i16*> %bases, i32 6
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %ptrs = getelementptr i16, <vscale x 2 x ptr> %bases, i32 6
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x i32*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [z0.d, #28]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, <vscale x 2 x i32*> %bases, i32 7
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %ptrs = getelementptr i32, <vscale x 2 x ptr> %bases, i32 7
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
 ; Tests where the immediate is out of range
 
-define <vscale x 2 x i64> @masked_gather_nxv2i8_range(<vscale x 2 x i8*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i8_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i32 32
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 32
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16_range(<vscale x 2 x half*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr half, <vscale x 2 x half*> %bases, i32 32
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %ptrs = getelementptr half, <vscale x 2 x ptr> %bases, i32 32
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16_range(<vscale x 2 x bfloat*> %bases, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, <vscale x 2 x bfloat*> %bases, i32 32
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %ptrs = getelementptr bfloat, <vscale x 2 x ptr> %bases, i32 32
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32_range(<vscale x 2 x float*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #128
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, <vscale x 2 x float*> %bases, i32 32
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %ptrs = getelementptr float, <vscale x 2 x ptr> %bases, i32 32
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64_range(<vscale x 2 x double*> %bases, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #256
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, <vscale x 2 x double*> %bases, i32 32
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %ptrs = getelementptr double, <vscale x 2 x ptr> %bases, i32 32
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
index 212606ca24ae3..88afc09d6f75b 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-reg.ll
@@ -1,137 +1,137 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i8(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.zext = zext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i16(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.zext = zext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i32(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.zext = zext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.zext
 }
 
-define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_gather_nxv2i64(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
   ret <vscale x 2 x i64> %vals
 }
 
-define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @masked_gather_nxv2f16(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
   ret <vscale x 2 x half> %vals
 }
 
-define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) #0 {
+define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x bfloat> undef)
   ret <vscale x 2 x bfloat> %vals
 }
 
-define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @masked_gather_nxv2f32(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
   ret <vscale x 2 x float> %vals
 }
 
-define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @masked_gather_nxv2f64(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
   ret <vscale x 2 x double> %vals
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i8(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sb { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %vals = call <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
   %vals.sext = sext <vscale x 2 x i8> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i16(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sh { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
   %vals.sext = sext <vscale x 2 x i16> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_sgather_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1sw { z0.d }, p0/z, [x0, z0.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  %vals = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
   %vals.sext = sext <vscale x 2 x i32> %vals to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %vals.sext
 }
 
-declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x i8*>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
-declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x i16*>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
-declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x i32*>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
-declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x i64*>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
-declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x half*>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
-declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
-declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x float*>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
-declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x double*>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+declare <vscale x 2 x bfloat> @llvm.masked.gather.nxv2bf16(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x bfloat>)
+declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-scaled.ll
index 459fd9ab96b8d..8c83172496fcb 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-scaled.ll
@@ -5,157 +5,157 @@
 ; scaled unpacked 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define void @masked_scatter_nxv2i16_sext(<vscale x 2 x i16> %data, i16* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i16_sext(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw #1]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i32_sext(<vscale x 2 x i32> %data, i32* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i32_sext(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, sxtw #2]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i64_sext(<vscale x 2 x i64> %data, i64* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i64_sext(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw #3]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f16_sext(<vscale x 2 x half> %data, half* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f16_sext(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw #1]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16_sext(<vscale x 2 x bfloat> %data, bfloat* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2bf16_sext(<vscale x 2 x bfloat> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw #1]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f32_sext(<vscale x 2 x float> %data, float* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f32_sext(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, sxtw #2]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f64_sext(<vscale x 2 x double> %data, double* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f64_sext(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f64_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw #3]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr double, double* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i16_zext(<vscale x 2 x i16> %data, i16* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i16_zext(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i32_zext(<vscale x 2 x i32> %data, i32* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i32_zext(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i64_zext(<vscale x 2 x i64> %data, i64* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i64_zext(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f16_zext(<vscale x 2 x half> %data, half* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f16_zext(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16_zext(<vscale x 2 x bfloat> %data, bfloat* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2bf16_zext(<vscale x 2 x bfloat> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw #1]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f32_zext(<vscale x 2 x float> %data, float* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f32_zext(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, uxtw #2]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f64_zext(<vscale x 2 x double> %data, double* %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f64_zext(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i32> %indexes, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f64_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw #3]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 2 x i32> %indexes to <vscale x 2 x i64>
-  %ptrs = getelementptr double, double* %base, <vscale x 2 x i64> %ext
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %ext
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
@@ -163,128 +163,128 @@ define void @masked_scatter_nxv2f64_zext(<vscale x 2 x double> %data, double* %b
 ; scaled packed 32-bit offset
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define void @masked_scatter_nxv4i16_sext(<vscale x 4 x i16> %data, i16* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i16_sext(<vscale x 4 x i16> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i16*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i32_sext(<vscale x 4 x i32> %data, i32* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i32_sext(<vscale x 4 x i32> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f16_sext(<vscale x 4 x half> %data, half* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4f16_sext(<vscale x 4 x half> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4f16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr half, half* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr half, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4bf16_sext(<vscale x 4 x bfloat> %data, bfloat* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4bf16_sext(<vscale x 4 x bfloat> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4bf16_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x bfloat*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f32_sext(<vscale x 4 x float> %data, float* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4f32_sext(<vscale x 4 x float> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4f32_sext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw #2]
 ; CHECK-NEXT:    ret
   %ext = sext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x float*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr float, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i16_zext(<vscale x 4 x i16> %data, i16* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i16_zext(<vscale x 4 x i16> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr i16, i16* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i16*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i32_zext(<vscale x 4 x i32> %data, i32* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i32_zext(<vscale x 4 x i32> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr i32, i32* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f16_zext(<vscale x 4 x half> %data, half* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4f16_zext(<vscale x 4 x half> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4f16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr half, half* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr half, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4bf16_zext(<vscale x 4 x bfloat> %data, bfloat* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4bf16_zext(<vscale x 4 x bfloat> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4bf16_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw #1]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x bfloat*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f32_zext(<vscale x 4 x float> %data, float* %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4f32_zext(<vscale x 4 x float> %data, ptr %base, <vscale x 4 x i32> %indexes, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4f32_zext:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 4 x i32> %indexes to <vscale x 4 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 4 x i64> %ext
-  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x float*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %ptrs = getelementptr float, ptr %base, <vscale x 4 x i64> %ext
+  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8*>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-unscaled.ll
index ff9dd40416bfa..950200dff58cc 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-32b-unscaled.ll
@@ -5,357 +5,357 @@
 ; unscaled unpacked 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define void @masked_scatter_nxv2i8_sext_offsets(<vscale x 2 x i8> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i8_sext_offsets(<vscale x 2 x i8> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i8_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i8*>
-  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i16_sext_offsets(<vscale x 2 x i16> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i16_sext_offsets(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i16_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i32_sext_offsets(<vscale x 2 x i32> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i32_sext_offsets(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i32_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i64_sext_offsets(<vscale x 2 x i64> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i64_sext_offsets(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i64_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f16_sext_offsets(<vscale x 2 x half> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f16_sext_offsets(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f16_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16_sext_offsets(<vscale x 2 x bfloat> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2bf16_sext_offsets(<vscale x 2 x bfloat> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f32_sext_offsets(<vscale x 2 x float> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f32_sext_offsets(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f32_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f64_sext_offsets(<vscale x 2 x double> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f64_sext_offsets(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f64_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i8_zext_offsets(<vscale x 2 x i8> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i8_zext_offsets(<vscale x 2 x i8> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i8_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i8*>
-  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i16_zext_offsets(<vscale x 2 x i16> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i16_zext_offsets(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i16_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i32_zext_offsets(<vscale x 2 x i32> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i32_zext_offsets(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i32_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i64_zext_offsets(<vscale x 2 x i64> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i64_zext_offsets(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i64_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f16_zext_offsets(<vscale x 2 x half> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f16_zext_offsets(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f16_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16_zext_offsets(<vscale x 2 x bfloat> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2bf16_zext_offsets(<vscale x 2 x bfloat> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f32_zext_offsets(<vscale x 2 x float> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f32_zext_offsets(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f32_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f64_zext_offsets(<vscale x 2 x double> %data, i8* %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f64_zext_offsets(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i32> %i32offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f64_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; unscaled packed 32-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-define void @masked_scatter_nxv4i8_sext_offsets(<vscale x 4 x i8> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i8_sext_offsets(<vscale x 4 x i8> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i8_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i8*>
-  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i16_sext_offsets(<vscale x 4 x i16> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i16_sext_offsets(<vscale x 4 x i16> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i16_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i16*>
-  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i16*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i32_sext_offsets(<vscale x 4 x i32> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i32_sext_offsets(<vscale x 4 x i32> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i32_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i32*>
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f16_sext_offsets(<vscale x 4 x half> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4f16_sext_offsets(<vscale x 4 x half> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4f16_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x half*>
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4bf16_sext_offsets(<vscale x 4 x bfloat> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4bf16_sext_offsets(<vscale x 4 x bfloat> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4bf16_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x bfloat*>
-  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x bfloat*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f32_sext_offsets(<vscale x 4 x float> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4f32_sext_offsets(<vscale x 4 x float> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4f32_sext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
   %offsets = sext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x float*>
-  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x float*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i8_zext_offsets(<vscale x 4 x i8> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i8_zext_offsets(<vscale x 4 x i8> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i8_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i8*>
-  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x i8*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i16_zext_offsets(<vscale x 4 x i16> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i16_zext_offsets(<vscale x 4 x i16> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i16_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i16*>
-  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x i16*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4i32_zext_offsets(<vscale x 4 x i32> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4i32_zext_offsets(<vscale x 4 x i32> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4i32_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x i32*>
-  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f16_zext_offsets(<vscale x 4 x half> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
+define void @masked_scatter_nxv4f16_zext_offsets(<vscale x 4 x half> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv4f16_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x half*>
-  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x half*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4bf16_zext_offsets(<vscale x 4 x bfloat> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4bf16_zext_offsets(<vscale x 4 x bfloat> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4bf16_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x bfloat*>
-  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x bfloat*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv4f32_zext_offsets(<vscale x 4 x float> %data, i8* %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv4f32_zext_offsets(<vscale x 4 x float> %data, ptr %base, <vscale x 4 x i32> %i32offsets, <vscale x 4 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv4f32_zext_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %offsets = zext <vscale x 4 x i32> %i32offsets to <vscale x 4 x i64>
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 4 x i64> %offsets
-  %ptrs = bitcast <vscale x 4 x i8*> %byte_ptrs to <vscale x 4 x float*>
-  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x float*> %ptrs, i32 0, <vscale x 4 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 4 x i64> %offsets
+  %ptrs = bitcast <vscale x 4 x ptr> %byte_ptrs to <vscale x 4 x ptr>
+  call void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x ptr> %ptrs, i32 0, <vscale x 4 x i1> %masks)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8*>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4f32(<vscale x 4 x float>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-scaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-scaled.ll
index 67aebaa2060c5..9244f2c81e799 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-scaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-scaled.ll
@@ -5,69 +5,69 @@
 ; scaled 64-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, i16* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, i32* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, i64* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i64, i64* %base, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, half* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr half, half* %base, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, float* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, float* %base, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr float, ptr %base, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, double* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, double* %base, <vscale x 2 x i64> %offsets
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr double, ptr %base, <vscale x 2 x i64> %offsets
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-unscaled.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-unscaled.ll
index 0f81e286f436d..67acf8618809b 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-unscaled.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-64b-unscaled.ll
@@ -5,104 +5,104 @@
 ; unscaled 64-bit offsets
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 
-define void @masked_scatter_nxv2i8_unscaled_64bit_offsets(<vscale x 2 x i8> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i8_unscaled_64bit_offsets(<vscale x 2 x i8> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i8_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i8*>
-  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i16_unscaled_64bit_offsets(<vscale x 2 x i16> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i16_unscaled_64bit_offsets(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i16_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i32_unscaled_64bit_offsets(<vscale x 2 x i32> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i32_unscaled_64bit_offsets(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i32_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2i64_unscaled_64bit_offsets(<vscale x 2 x i64> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2i64_unscaled_64bit_offsets(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2i64_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f16_unscaled_64bit_offsets(<vscale x 2 x half> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
+define void @masked_scatter_nxv2f16_unscaled_64bit_offsets(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind {
 ; CHECK-LABEL: masked_scatter_nxv2f16_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16_unscaled_64bit_offsets(<vscale x 2 x bfloat> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2bf16_unscaled_64bit_offsets(<vscale x 2 x bfloat> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f32_unscaled_64bit_offsets(<vscale x 2 x float> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2f32_unscaled_64bit_offsets(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2f32_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-define void @masked_scatter_nxv2f64_unscaled_64bit_offsets(<vscale x 2 x double> %data, i8* %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind #0 {
+define void @masked_scatter_nxv2f64_unscaled_64bit_offsets(<vscale x 2 x double> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1> %masks) nounwind #0 {
 ; CHECK-LABEL: masked_scatter_nxv2f64_unscaled_64bit_offsets:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, i8* %base, <vscale x 2 x i64> %offsets
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 0, <vscale x 2 x i1> %masks)
+  %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 0, <vscale x 2 x i1> %masks)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32*>, i32, <vscale x 4 x i1>)
-declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8*>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
+declare void @llvm.masked.scatter.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll
index 9392094629828..9216381942e87 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-legalize.ll
@@ -6,7 +6,7 @@ target triple = "aarch64-linux-gnu"
 ; Tests that exercise various type legalisation scenarios for ISD::MSCATTER.
 
 ; Code generate the scenario where the offset vector type is illegal.
-define void @masked_scatter_nxv16i8(<vscale x 16 x i8> %data, i8* %base, <vscale x 16 x i8> %offsets, <vscale x 16 x i1> %mask) #0 {
+define void @masked_scatter_nxv16i8(<vscale x 16 x i8> %data, ptr %base, <vscale x 16 x i8> %offsets, <vscale x 16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sunpklo z2.h, z1.b
@@ -32,12 +32,12 @@ define void @masked_scatter_nxv16i8(<vscale x 16 x i8> %data, i8* %base, <vscale
 ; CHECK-NEXT:    st1b { z3.s }, p1, [x0, z2.s, sxtw]
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, z1.s, sxtw]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, i8* %base, <vscale x 16 x i8> %offsets
-  call void @llvm.masked.scatter.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8*> %ptrs, i32 1, <vscale x 16 x i1> %mask)
+  %ptrs = getelementptr i8, ptr %base, <vscale x 16 x i8> %offsets
+  call void @llvm.masked.scatter.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x ptr> %ptrs, i32 1, <vscale x 16 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv8i16(<vscale x 8 x i16> %data, i16* %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) #0 {
+define void @masked_scatter_nxv8i16(<vscale x 8 x i16> %data, ptr %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sunpklo z2.s, z1.h
@@ -49,12 +49,12 @@ define void @masked_scatter_nxv8i16(<vscale x 8 x i16> %data, i16* %base, <vscal
 ; CHECK-NEXT:    st1h { z3.s }, p1, [x0, z2.s, sxtw #1]
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, i16* %base, <vscale x 8 x i16> %offsets
-  call void @llvm.masked.scatter.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16*> %ptrs, i32 1, <vscale x 8 x i1> %mask)
+  %ptrs = getelementptr i16, ptr %base, <vscale x 8 x i16> %offsets
+  call void @llvm.masked.scatter.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv8bf16(<vscale x 8 x bfloat> %data, bfloat* %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) #0 {
+define void @masked_scatter_nxv8bf16(<vscale x 8 x bfloat> %data, ptr %base, <vscale x 8 x i16> %offsets, <vscale x 8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv8bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sunpklo z2.s, z1.h
@@ -66,12 +66,12 @@ define void @masked_scatter_nxv8bf16(<vscale x 8 x bfloat> %data, bfloat* %base,
 ; CHECK-NEXT:    st1h { z3.s }, p1, [x0, z2.s, sxtw #1]
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, z1.s, sxtw #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, bfloat* %base, <vscale x 8 x i16> %offsets
-  call void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x bfloat*> %ptrs, i32 1, <vscale x 8 x i1> %mask)
+  %ptrs = getelementptr bfloat, ptr %base, <vscale x 8 x i16> %offsets
+  call void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat> %data, <vscale x 8 x ptr> %ptrs, i32 1, <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv8f32(<vscale x 8 x float> %data, float* %base, <vscale x 8 x i32> %indexes, <vscale x 8 x i1> %masks) #0 {
+define void @masked_scatter_nxv8f32(<vscale x 8 x float> %data, ptr %base, <vscale x 8 x i32> %indexes, <vscale x 8 x i1> %masks) #0 {
 ; CHECK-LABEL: masked_scatter_nxv8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    punpklo p1.h, p0.b
@@ -80,13 +80,13 @@ define void @masked_scatter_nxv8f32(<vscale x 8 x float> %data, float* %base, <v
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0, z3.s, uxtw #2]
 ; CHECK-NEXT:    ret
   %ext = zext <vscale x 8 x i32> %indexes to <vscale x 8 x i64>
-  %ptrs = getelementptr float, float* %base, <vscale x 8 x i64> %ext
-  call void @llvm.masked.scatter.nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x float*> %ptrs, i32 0, <vscale x 8 x i1> %masks)
+  %ptrs = getelementptr float, ptr %base, <vscale x 8 x i64> %ext
+  call void @llvm.masked.scatter.nxv8f32(<vscale x 8 x float> %data, <vscale x 8 x ptr> %ptrs, i32 0, <vscale x 8 x i1> %masks)
   ret void
 }
 
 ; Code generate the worst case scenario when all vector types are illegal.
-define void @masked_scatter_nxv32i32(<vscale x 32 x i32> %data, i32* %base, <vscale x 32 x i32> %offsets, <vscale x 32 x i1> %mask) #0 {
+define void @masked_scatter_nxv32i32(<vscale x 32 x i32> %data, ptr %base, <vscale x 32 x i32> %offsets, <vscale x 32 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv32i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p2.s
@@ -119,14 +119,14 @@ define void @masked_scatter_nxv32i32(<vscale x 32 x i32> %data, i32* %base, <vsc
 ; CHECK-NEXT:    st1w { z6.s }, p1, [x0, z25.s, sxtw #2]
 ; CHECK-NEXT:    st1w { z7.s }, p0, [x0, z24.s, sxtw #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, i32* %base, <vscale x 32 x i32> %offsets
-  call void @llvm.masked.scatter.nxv32i32(<vscale x 32 x i32> %data, <vscale x 32 x i32*> %ptrs, i32 4, <vscale x 32 x i1> %mask)
+  %ptrs = getelementptr i32, ptr %base, <vscale x 32 x i32> %offsets
+  call void @llvm.masked.scatter.nxv32i32(<vscale x 32 x i32> %data, <vscale x 32 x ptr> %ptrs, i32 4, <vscale x 32 x i1> %mask)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8*>,  i32, <vscale x 16 x i1>)
-declare void @llvm.masked.scatter.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16*>,  i32, <vscale x 8 x i1>)
-declare void @llvm.masked.scatter.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float*>, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat*>, i32, <vscale x 8 x i1>)
-declare void @llvm.masked.scatter.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32*>,  i32, <vscale x 32 x i1>)
+declare void @llvm.masked.scatter.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x ptr>,  i32, <vscale x 16 x i1>)
+declare void @llvm.masked.scatter.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x ptr>,  i32, <vscale x 8 x i1>)
+declare void @llvm.masked.scatter.nxv8f32(<vscale x 8 x float>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.scatter.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.scatter.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x ptr>,  i32, <vscale x 32 x i1>)
 attributes #0 = { nounwind "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-imm.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-imm.ll
index cc33f77d7d88a..4cea88215bb1b 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-imm.ll
@@ -1,138 +1,138 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define void @masked_scatter_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [z1.d, #1]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i32 1
-  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 1
+  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d, #2]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, <vscale x 2 x i16*> %bases, i32 1
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i16, <vscale x 2 x ptr> %bases, i32 1
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d, #4]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, <vscale x 2 x i32*> %bases, i32 1
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i32, <vscale x 2 x ptr> %bases, i32 1
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d, #8]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i64, <vscale x 2 x i64*> %bases, i32 1
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i64, <vscale x 2 x ptr> %bases, i32 1
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d, #4]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr half, <vscale x 2 x half*> %bases, i32 2
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr half, <vscale x 2 x ptr> %bases, i32 2
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %bases, <vscale x 2 x i1> %mask) #0 {
+define void @masked_scatter_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [z1.d, #4]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr bfloat, <vscale x 2 x bfloat*> %bases, i32 2
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr bfloat, <vscale x 2 x ptr> %bases, i32 2
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [z1.d, #12]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr float, <vscale x 2 x float*> %bases, i32 3
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr float, <vscale x 2 x ptr> %bases, i32 3
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [z1.d, #32]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, <vscale x 2 x double*> %bases, i32 4
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr double, <vscale x 2 x ptr> %bases, i32 4
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
 ; Test where the immediate is out of range
 
-define void @masked_scatter_nxv2i8_range(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i8_range(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i8_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #32
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i32 32
-  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 32
+  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i16_range(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i16_range(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i16_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #64
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i16, <vscale x 2 x i16*> %bases, i32 32
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i16, <vscale x 2 x ptr> %bases, i32 32
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i32_range(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i32_range(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i32_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #128
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i32, <vscale x 2 x i32*> %bases, i32 32
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 1, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i32, <vscale x 2 x ptr> %bases, i32 32
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f64_range(<vscale x 2 x double> %data, <vscale x 2 x double*> %bases, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f64_range(<vscale x 2 x double> %data, <vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f64_range:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #256
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr double, <vscale x 2 x double*> %bases, i32 32
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr double, <vscale x 2 x ptr> %bases, i32 32
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8*>,  i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x ptr>,  i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-reg.ll b/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-reg.ll
index 4164158c36cb9..3a93848506b52 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-scatter-vec-plus-reg.ll
@@ -1,99 +1,99 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
-define void @masked_scatter_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %mask)
+  %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  call void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8> %data, <vscale x 2 x ptr> %ptrs, i32 1, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i16*>
-  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i32*>
-  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %mask)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32> %data, <vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x i64*>
-  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %mask)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x half*>
-  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) #0 {
+define void @masked_scatter_nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_scatter_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x bfloat*>
-  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x bfloat*> %ptrs, i32 2, <vscale x 2 x i1> %mask)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat> %data, <vscale x 2 x ptr> %ptrs, i32 2, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x float*>
-  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %mask)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float> %data, <vscale x 2 x ptr> %ptrs, i32 4, <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i8*> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
+define void @masked_scatter_nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %bases, i64 %offset, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_scatter_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, z1.d]
 ; CHECK-NEXT:    ret
-  %byte_ptrs = getelementptr i8, <vscale x 2 x i8*> %bases, i64 %offset
-  %ptrs = bitcast <vscale x 2 x i8*> %byte_ptrs to <vscale x 2 x double*>
-  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %mask)
+  %byte_ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i64 %offset
+  %ptrs = bitcast <vscale x 2 x ptr> %byte_ptrs to <vscale x 2 x ptr>
+  call void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x ptr> %ptrs, i32 8, <vscale x 2 x i1> %mask)
   ret void
 }
 
-declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8*>,  i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
-declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x ptr>,  i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f16(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f32(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
+declare void @llvm.masked.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-merging-stores.ll b/llvm/test/CodeGen/AArch64/sve-merging-stores.ll
index 06c6ff1a6e522..47758893ce711 100644
--- a/llvm/test/CodeGen/AArch64/sve-merging-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-merging-stores.ll
@@ -3,27 +3,26 @@
 %complex = type { { double, double } }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1>, double*) #3
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1>, ptr) #3
 
 ; Function Attrs: nounwind readnone
 declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>) #2
 
-define void @foo1(%complex* %outval, <vscale x 2 x i1> %pred, double *%inptr) {
+define void @foo1(ptr %outval, <vscale x 2 x i1> %pred, ptr %inptr) {
 ; CHECK-LABEL: foo1:
 ; CHECK: ld2d { z0.d, z1.d }, p0/z, [x1]
 ; CHECK-NEXT: faddv d2, p0, z0.d
 ; CHECK-NEXT: faddv d0, p0, z1.d
 ; CHECK-NEXT: mov v2.d[1], v0.d[0]
 ; CHECK-NEXT: str q2, [x0]
-  %realp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 0
-  %imagp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 1
-  %1 = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1> %pred, double* nonnull %inptr)
+  %imagp = getelementptr inbounds %complex, ptr %outval, i64 0, i32 0, i32 1
+  %1 = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1> %pred, ptr nonnull %inptr)
   %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %1, 0
   %3 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %2)
   %4 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %1, 1
   %5 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %4)
-  store double %3, double* %realp, align 8
-  store double %5, double* %imagp, align 8
+  store double %3, ptr %outval, align 8
+  store double %5, ptr %imagp, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
index a8bfe75bac6e2..fe14b9602093e 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-reg.ll
@@ -2,127 +2,120 @@
 
 ; 2-lane contiguous load/stores
 
-define void @test_masked_ldst_sv2i8(i8 * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2i8(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i8:
 ; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1]
 ; CHECK-NEXT: st1b { z[[DATA]].d }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 2 x i8>*
-  %data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %data = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %data,
-                                      <vscale x 2 x i8>* %base_addr,
+                                      <vscale x 2 x i8>* %base_i8,
                                       i32 1,
                                       <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2i16(i16 * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2i16(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i16:
 ; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: st1h { z[[DATA]].d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 2 x i16>*
-  %data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_addr,
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
+  %data = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_i16,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
   call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %data,
-                                       <vscale x 2 x i16>* %base_addr,
+                                       <vscale x 2 x i16>* %base_i16,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2i32(i32 * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2i32(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i32:
 ; CHECK-NEXT: ld1w  { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: st1w  { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base_i32 = getelementptr i32, i32* %base, i64 %offset
-  %base_addr = bitcast i32* %base_i32 to <vscale x 2 x i32>*
-  %data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_addr,
+  %base_i32 = getelementptr i32, ptr %base, i64 %offset
+  %data = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_i32,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
   call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %data,
-                                       <vscale x 2 x i32>* %base_addr,
+                                       <vscale x 2 x i32>* %base_i32,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2i64(i64 * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i64:
 ; CHECK-NEXT: ld1d  { z0.d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT: st1d  { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT: ret
-  %base_i64 = getelementptr i64, i64* %base, i64 %offset
-  %base_addr = bitcast i64* %base_i64 to <vscale x 2 x i64>*
-  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_addr,
+  %base_i64 = getelementptr i64, ptr %base, i64 %offset
+  %data = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %base_i64,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i64> undef)
   call void @llvm.masked.store.nxv2i64(<vscale x 2 x i64> %data,
-                                       <vscale x 2 x i64>* %base_addr,
+                                       <vscale x 2 x i64>* %base_i64,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2f16(half * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2f16(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f16:
 ; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: st1h { z[[DATA]].d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_half = getelementptr half, half* %base, i64 %offset
-  %base_addr = bitcast half* %base_half to <vscale x 2 x half>* 
-  %data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %base_addr,
+  %base_half = getelementptr half, ptr %base, i64 %offset
+  %data = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %base_half,
                                                              i32 1,
                                                              <vscale x 2 x i1> %mask,
                                                              <vscale x 2 x half> undef)
   call void @llvm.masked.store.nxv2f16(<vscale x 2 x half> %data,
-                                       <vscale x 2 x half>* %base_addr,
+                                       <vscale x 2 x half>* %base_half,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 } 
 
-define void @test_masked_ldst_sv2f32(float * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2f32(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f32:
 ; CHECK-NEXT: ld1w { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: st1w { z[[DATA]].d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base_float = getelementptr float, float* %base, i64 %offset
-  %base_addr = bitcast float* %base_float to <vscale x 2 x float>* 
-  %data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %base_addr,
+  %base_float = getelementptr float, ptr %base, i64 %offset
+  %data = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %base_float,
                                                               i32 1,
                                                               <vscale x 2 x i1> %mask,
                                                               <vscale x 2 x float> undef)
   call void @llvm.masked.store.nxv2f32(<vscale x 2 x float> %data,
-                                       <vscale x 2 x float>* %base_addr,
+                                       <vscale x 2 x float>* %base_float,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv2f64(double * %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2f64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f64:
 ; CHECK-NEXT: ld1d { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT: st1d { z[[DATA]].d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT: ret
-  %base_double = getelementptr double, double* %base, i64 %offset
-  %base_addr = bitcast double* %base_double to <vscale x 2 x double>* 
-  %data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %base_addr,
+  %base_double = getelementptr double, ptr %base, i64 %offset
+  %data = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %base_double,
                                                                i32 1,
                                                                <vscale x 2 x i1> %mask,
                                                                <vscale x 2 x double> undef)
   call void @llvm.masked.store.nxv2f64(<vscale x 2 x double> %data,
-                                       <vscale x 2 x double>* %base_addr,
+                                       <vscale x 2 x double>* %base_double,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -130,13 +123,12 @@ define void @test_masked_ldst_sv2f64(double * %base, <vscale x 2 x i1> %mask, i6
 
 ; 2-lane zero/sign extended contiguous loads.
 
-define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(i8* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_zload_sv2i8_to_sv2i64:
 ; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 2 x i8>*
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
@@ -144,13 +136,12 @@ define <vscale x 2 x i64> @masked_zload_sv2i8_to_sv2i64(i8* %base, <vscale x 2 x
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(i8* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_sload_sv2i8_to_sv2i64:
 ; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 2 x i8>*
-  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 2 x i1> %mask,
                                                           <vscale x 2 x i8> undef)
@@ -158,13 +149,12 @@ define <vscale x 2 x i64> @masked_sload_sv2i8_to_sv2i64(i8* %base, <vscale x 2 x
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(i16* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_zload_sv2i16_to_sv2i64:
 ; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 2 x i16>*
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_addr,
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_i16,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
@@ -172,13 +162,12 @@ define <vscale x 2 x i64> @masked_zload_sv2i16_to_sv2i64(i16* %base, <vscale x 2
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(i16* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_sload_sv2i16_to_sv2i64:
 ; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 2 x i16>*
-  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_addr,
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %base_i16,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i16> undef)
@@ -187,13 +176,12 @@ define <vscale x 2 x i64> @masked_sload_sv2i16_to_sv2i64(i16* %base, <vscale x 2
 }
 
 
-define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(i32* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_zload_sv2i32_to_sv2i64:
 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base_i32 = getelementptr i32, i32* %base, i64 %offset
-  %base_addr = bitcast i32* %base_i32 to <vscale x 2 x i32>*
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_addr,
+  %base_i32 = getelementptr i32, ptr %base, i64 %offset
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_i32,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
@@ -201,13 +189,12 @@ define <vscale x 2 x i64> @masked_zload_sv2i32_to_sv2i64(i32* %base, <vscale x 2
   ret <vscale x 2 x i64> %ext
 }
 
-define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(i32* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_sload_sv2i32_to_sv2i64:
 ; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base_i32 = getelementptr i32, i32* %base, i64 %offset
-  %base_addr = bitcast i32* %base_i32 to <vscale x 2 x i32>*
-  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_addr,
+  %base_i32 = getelementptr i32, ptr %base, i64 %offset
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %base_i32,
                                                             i32 1,
                                                             <vscale x 2 x i1> %mask,
                                                             <vscale x 2 x i32> undef)
@@ -217,43 +204,40 @@ define <vscale x 2 x i64> @masked_sload_sv2i32_to_sv2i64(i32* %base, <vscale x 2
 
 ; 2-lane truncating contiguous stores.
 
-define void @masked_trunc_store_sv2i64_to_sv2i8(<vscale x 2 x i64> %val, i8 *%base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @masked_trunc_store_sv2i64_to_sv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i8:
 ; CHECK-NEXT: st1b { z0.d }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 2 x i8>*
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
   call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc,
-                                      <vscale x 2 x i8> *%base_addr,
+                                      <vscale x 2 x i8> *%base_i8,
                                       i32 1,
                                       <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_sv2i64_to_sv2i16(<vscale x 2 x i64> %val, i16 *%base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @masked_trunc_store_sv2i64_to_sv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i16:
 ; CHECK-NEXT: st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 2 x i16>*
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
   call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %trunc,
-                                       <vscale x 2 x i16> *%base_addr,
+                                       <vscale x 2 x i16> *%base_i16,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, i32 *%base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv2i64_to_sv2i32:
 ; CHECK-NEXT: st1w { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base_i32 = getelementptr i32, i32* %base, i64 %offset
-  %base_addr = bitcast i32* %base_i32 to <vscale x 2 x i32>*
+  %base_i32 = getelementptr i32, ptr %base, i64 %offset
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
   call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %trunc,
-                                       <vscale x 2 x i32> *%base_addr,
+                                       <vscale x 2 x i32> *%base_i32,
                                        i32 1,
                                        <vscale x 2 x i1> %mask)
   ret void
@@ -261,91 +245,86 @@ define void @masked_trunc_store_sv2i64_to_sv2i32(<vscale x 2 x i64> %val, i32 *%
 
 ; 4-lane contiguous load/stores.
 
-define void @test_masked_ldst_sv4i8(i8 * %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv4i8(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i8:
 ; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1]
 ; CHECK-NEXT: st1b { z[[DATA]].s }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 4 x i8>*
-  %data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %data = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
   call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %data,
-                                      <vscale x 4 x i8>* %base_addr,
+                                      <vscale x 4 x i8>* %base_i8,
                                       i32 1,
                                       <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4i16(i16 * %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv4i16(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i16:
 ; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: st1h { z[[DATA]].s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 4 x i16>*
-  %data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_addr,
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
+  %data = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_i16,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
   call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %data,
-                                       <vscale x 4 x i16>* %base_addr,
+                                       <vscale x 4 x i16>* %base_i16,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4i32(i32 * %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i32:
 ; CHECK-NEXT: ld1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: st1w { z[[DATA]].s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base_i32 = getelementptr i32, i32* %base, i64 %offset
-  %base_addr = bitcast i32* %base_i32 to <vscale x 4 x i32>*
-  %data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %base_addr,
+  %base_i32 = getelementptr i32, ptr %base, i64 %offset
+  %data = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %base_i32,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i32> undef)
   call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %data,
-                                       <vscale x 4 x i32>* %base_addr,
+                                       <vscale x 4 x i32>* %base_i32,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4f16(half * %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv4f16(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4f16:
 ; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: st1h { z[[DATA]].s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_f16 = getelementptr half, half* %base, i64 %offset
-  %base_addr = bitcast half* %base_f16 to <vscale x 4 x half>*
-  %data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %base_addr,
+  %base_f16 = getelementptr half, ptr %base, i64 %offset
+  %data = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %base_f16,
                                                              i32 1,
                                                              <vscale x 4 x i1> %mask,
                                                              <vscale x 4 x half> undef)
   call void @llvm.masked.store.nxv4f16(<vscale x 4 x half> %data,
-                                       <vscale x 4 x half>* %base_addr,
+                                       <vscale x 4 x half>* %base_f16,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv4f32(float * %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv4f32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4f32:
 ; CHECK-NEXT: ld1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: st1w { z[[DATA]].s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %base_f32 = getelementptr float, float* %base, i64 %offset
-  %base_addr = bitcast float* %base_f32 to <vscale x 4 x float>*
-  %data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %base_addr,
+  %base_f32 = getelementptr float, ptr %base, i64 %offset
+  %data = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %base_f32,
                                                               i32 1,
                                                               <vscale x 4 x i1> %mask,
                                                               <vscale x 4 x float> undef)
   call void @llvm.masked.store.nxv4f32(<vscale x 4 x float> %data,
-                                       <vscale x 4 x float>* %base_addr,
+                                       <vscale x 4 x float>* %base_f32,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -353,13 +332,12 @@ define void @test_masked_ldst_sv4f32(float * %base, <vscale x 4 x i1> %mask, i64
 
 ; 4-lane zero/sign extended contiguous loads.
 
-define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(i8* %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_zload_sv4i8_to_sv4i32:
 ; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 4 x i8>*
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
@@ -367,13 +345,12 @@ define <vscale x 4 x i32> @masked_zload_sv4i8_to_sv4i32(i8* %base, <vscale x 4 x
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(i8* %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_sload_sv4i8_to_sv4i32:
 ; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 4 x i8>*
-  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 4 x i1> %mask,
                                                           <vscale x 4 x i8> undef)
@@ -381,13 +358,12 @@ define <vscale x 4 x i32> @masked_sload_sv4i8_to_sv4i32(i8* %base, <vscale x 4 x
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(i16* %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_zload_sv4i16_to_sv4i32:
 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 4 x i16>*
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_addr,
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_i16,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
@@ -395,13 +371,12 @@ define <vscale x 4 x i32> @masked_zload_sv4i16_to_sv4i32(i16* %base, <vscale x 4
   ret <vscale x 4 x i32> %ext
 }
 
-define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(i16* %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_sload_sv4i16_to_sv4i32:
 ; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 4 x i16>*
-  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_addr,
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %base_i16,
                                                             i32 1,
                                                             <vscale x 4 x i1> %mask,
                                                             <vscale x 4 x i16> undef)
@@ -411,29 +386,27 @@ define <vscale x 4 x i32> @masked_sload_sv4i16_to_sv4i32(i16* %base, <vscale x 4
 
 ; 4-lane truncating contiguous stores.
 
-define void @masked_trunc_store_sv4i32_to_sv4i8(<vscale x 4 x i32> %val, i8 *%base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @masked_trunc_store_sv4i32_to_sv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv4i32_to_sv4i8:
 ; CHECK-NEXT: st1b { z0.s }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 4 x i8>*
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
   call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %trunc,
-                                      <vscale x 4 x i8> *%base_addr,
+                                      <vscale x 4 x i8> *%base_i8,
                                       i32 1,
                                       <vscale x 4 x i1> %mask)
   ret void
 }
 
-define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, i16 *%base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv4i32_to_sv4i16:
 ; CHECK-NEXT: st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 4 x i16>*
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
   call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %trunc,
-                                       <vscale x 4 x i16> *%base_addr,
+                                       <vscale x 4 x i16> *%base_i16,
                                        i32 1,
                                        <vscale x 4 x i1> %mask)
   ret void
@@ -441,73 +414,69 @@ define void @masked_trunc_store_sv4i32_to_sv4i16(<vscale x 4 x i32> %val, i16 *%
 
 ; 8-lane contiguous load/stores.
 
-define void @test_masked_ldst_sv8i8(i8 * %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv8i8(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8i8:
 ; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1]
 ; CHECK-NEXT: st1b { z[[DATA]].h }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 8 x i8>*
-  %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %data = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
   call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %data,
-                                      <vscale x 8 x i8>* %base_addr,
+                                      <vscale x 8 x i8>* %base_i8,
                                       i32 1,
                                       <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv8i16(i16 * %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv8i16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8i16:
 ; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_i16 = getelementptr i16, i16* %base, i64 %offset
-  %base_addr = bitcast i16* %base_i16 to <vscale x 8 x i16>*
-  %data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %base_addr,
+  %base_i16 = getelementptr i16, ptr %base, i64 %offset
+  %data = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %base_i16,
                                                             i32 1,
                                                             <vscale x 8 x i1> %mask,
                                                             <vscale x 8 x i16> undef)
   call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %data,
-                                       <vscale x 8 x i16>* %base_addr,
+                                       <vscale x 8 x i16>* %base_i16,
                                        i32 1,
                                        <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv8f16(half * %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv8f16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8f16:
 ; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_f16 = getelementptr half, half* %base, i64 %offset
-  %base_addr = bitcast half* %base_f16 to <vscale x 8 x half>*
-  %data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %base_addr,
+  %base_f16 = getelementptr half, ptr %base, i64 %offset
+  %data = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %base_f16,
                                                              i32 1,
                                                              <vscale x 8 x i1> %mask,
                                                              <vscale x 8 x half> undef)
   call void @llvm.masked.store.nxv8f16(<vscale x 8 x half> %data,
-                                       <vscale x 8 x half>* %base_addr,
+                                       <vscale x 8 x half>* %base_f16,
                                        i32 1,
                                        <vscale x 8 x i1> %mask)
   ret void
 }
 
-define void @test_masked_ldst_sv8bf16(bfloat * %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind #0 {
+define void @test_masked_ldst_sv8bf16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind #0 {
 ; CHECK-LABEL: test_masked_ldst_sv8bf16:
 ; CHECK-NEXT: ld1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: st1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %base_f16 = getelementptr bfloat, bfloat* %base, i64 %offset
-  %base_addr = bitcast bfloat* %base_f16 to <vscale x 8 x bfloat>*
-  %data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>* %base_addr,
+  %base_f16 = getelementptr bfloat, ptr %base, i64 %offset
+  %data = call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16(<vscale x 8 x bfloat>* %base_f16,
                                                                i32 1,
                                                                <vscale x 8 x i1> %mask,
                                                                <vscale x 8 x bfloat> undef)
   call void @llvm.masked.store.nxv8bf16(<vscale x 8 x bfloat> %data,
-                                        <vscale x 8 x bfloat>* %base_addr,
+                                        <vscale x 8 x bfloat>* %base_f16,
                                         i32 1,
                                         <vscale x 8 x i1> %mask)
   ret void
@@ -515,13 +484,12 @@ define void @test_masked_ldst_sv8bf16(bfloat * %base, <vscale x 8 x i1> %mask, i
 
 ; 8-lane zero/sign extended contiguous loads.
 
-define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(i8* %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_zload_sv8i8_to_sv8i16:
 ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 8 x i8>*
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
@@ -529,13 +497,12 @@ define <vscale x 8 x i16> @masked_zload_sv8i8_to_sv8i16(i8* %base, <vscale x 8 x
   ret <vscale x 8 x i16> %ext
 }
 
-define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(i8* %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_sload_sv8i8_to_sv8i16:
 ; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 8 x i8>*
-  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %base_i8,
                                                           i32 1,
                                                           <vscale x 8 x i1> %mask,
                                                           <vscale x 8 x i8> undef)
@@ -545,15 +512,14 @@ define <vscale x 8 x i16> @masked_sload_sv8i8_to_sv8i16(i8* %base, <vscale x 8 x
 
 ; 8-lane truncating contiguous stores.
 
-define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, i8 *%base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: masked_trunc_store_sv8i16_to_sv8i8:
 ; CHECK-NEXT: st1b { z0.h }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 8 x i8>*
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
   %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
   call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc,
-                                      <vscale x 8 x i8> *%base_addr,
+                                      <vscale x 8 x i8> *%base_i8,
                                       i32 1,
                                       <vscale x 8 x i1> %mask)
   ret void
@@ -561,19 +527,18 @@ define void @masked_trunc_store_sv8i16_to_sv8i8(<vscale x 8 x i16> %val, i8 *%ba
 
 ; 16-lane contiguous load/stores.
 
-define void @test_masked_ldst_sv16i8(i8 * %base, <vscale x 16 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv16i8(ptr %base, <vscale x 16 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv16i8:
 ; CHECK-NEXT: ld1b { z[[DATA:[0-9]+]].b }, p0/z, [x0, x1]
 ; CHECK-NEXT: st1b { z[[DATA]].b }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %base_i8 = getelementptr i8, i8* %base, i64 %offset
-  %base_addr = bitcast i8* %base_i8 to <vscale x 16 x i8>*
-  %data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %base_addr,
+  %base_i8 = getelementptr i8, ptr %base, i64 %offset
+  %data = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %base_i8,
                                                             i32 1,
                                                             <vscale x 16 x i1> %mask,
                                                             <vscale x 16 x i8> undef)
   call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %data,
-                                       <vscale x 16 x i8>* %base_addr,
+                                       <vscale x 16 x i8>* %base_i8,
                                        i32 1,
                                        <vscale x 16 x i1> %mask)
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll
index 43863b37bed26..89c8e25e32e17 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-reg.ll
@@ -2,156 +2,156 @@
 
 ; 2-lane non-temporal load/stores
 
-define void @test_masked_ldst_sv2i64(i64* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2i64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2i64:
 ; CHECK-NEXT: ldnt1d { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT: stnt1d { z[[DATA]].d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT: ret
-  %gep = getelementptr i64, i64* %base, i64 %offset
+  %gep = getelementptr i64, ptr %base, i64 %offset
   %data = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1> %mask,
-                                                                  i64* %gep)
+                                                                  ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64> %data,
                                             <vscale x 2 x i1> %mask,
-                                            i64* %gep)
+                                            ptr %gep)
   ret void
 }
 
-define void @test_masked_ldst_sv2f64(double* %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv2f64(ptr %base, <vscale x 2 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv2f64:
 ; CHECK-NEXT: ldnt1d { z[[DATA:[0-9]+]].d }, p0/z, [x0, x1, lsl #3]
 ; CHECK-NEXT: stnt1d { z[[DATA]].d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT: ret
-  %gep = getelementptr double, double* %base, i64 %offset
+  %gep = getelementptr double, ptr %base, i64 %offset
   %data = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1> %mask,
-                                                                    double* %gep)
+                                                                    ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double> %data,
                                             <vscale x 2 x i1> %mask,
-                                            double* %gep)
+                                            ptr %gep)
   ret void
 }
 
 ; 4-lane non-temporal load/stores.
 
-define void @test_masked_ldst_sv4i32(i32* %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv4i32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4i32:
 ; CHECK-NEXT: ldnt1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: stnt1w { z[[DATA]].s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %gep = getelementptr i32, i32* %base, i64 %offset
+  %gep = getelementptr i32, ptr %base, i64 %offset
   %data = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1> %mask,
-                                                                  i32* %gep)
+                                                                  ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32> %data,
                                             <vscale x 4 x i1> %mask,
-                                            i32* %gep)
+                                            ptr %gep)
   ret void
 }
 
-define void @test_masked_ldst_sv4f32(float* %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv4f32(ptr %base, <vscale x 4 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv4f32:
 ; CHECK-NEXT: ldnt1w { z[[DATA:[0-9]+]].s }, p0/z, [x0, x1, lsl #2]
 ; CHECK-NEXT: stnt1w { z[[DATA]].s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT: ret
-  %gep = getelementptr float, float* %base, i64 %offset
+  %gep = getelementptr float, ptr %base, i64 %offset
   %data = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1> %mask,
-                                                                    float* %gep)
+                                                                    ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float> %data,
                                             <vscale x 4 x i1> %mask,
-                                            float* %gep)
+                                            ptr %gep)
   ret void
 }
 
 
 ; 8-lane non-temporal load/stores.
 
-define void @test_masked_ldst_sv8i16(i16* %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv8i16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8i16:
 ; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %gep = getelementptr i16, i16* %base, i64 %offset
+  %gep = getelementptr i16, ptr %base, i64 %offset
   %data = call <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1> %mask,
-                                                                  i16* %gep)
+                                                                  ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16> %data,
                                             <vscale x 8 x i1> %mask,
-                                            i16* %gep)
+                                            ptr %gep)
   ret void
 }
 
-define void @test_masked_ldst_sv8f16(half* %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv8f16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv8f16:
 ; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %gep = getelementptr half, half* %base, i64 %offset
+  %gep = getelementptr half, ptr %base, i64 %offset
   %data = call <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1> %mask,
-                                                                   half* %gep)
+                                                                   ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half> %data,
                                             <vscale x 8 x i1> %mask,
-                                            half* %gep)
+                                            ptr %gep)
   ret void
 }
 
-define void @test_masked_ldst_sv8bf16(bfloat* %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind #0 {
+define void @test_masked_ldst_sv8bf16(ptr %base, <vscale x 8 x i1> %mask, i64 %offset) nounwind #0 {
 ; CHECK-LABEL: test_masked_ldst_sv8bf16:
 ; CHECK-NEXT: ldnt1h { z[[DATA:[0-9]+]].h }, p0/z, [x0, x1, lsl #1]
 ; CHECK-NEXT: stnt1h { z[[DATA]].h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT: ret
-  %gep = getelementptr bfloat, bfloat* %base, i64 %offset
+  %gep = getelementptr bfloat, ptr %base, i64 %offset
   %data = call <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1> %mask,
-                                                                      bfloat* %gep)
+                                                                      ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat> %data,
                                              <vscale x 8 x i1> %mask,
-                                             bfloat* %gep)
+                                             ptr %gep)
   ret void
 }
 
 ; 16-lane non-temporal load/stores.
 
-define void @test_masked_ldst_sv16i8(i8* %base, <vscale x 16 x i1> %mask, i64 %offset) nounwind {
+define void @test_masked_ldst_sv16i8(ptr %base, <vscale x 16 x i1> %mask, i64 %offset) nounwind {
 ; CHECK-LABEL: test_masked_ldst_sv16i8:
 ; CHECK-NEXT: ldnt1b { z[[DATA:[0-9]+]].b }, p0/z, [x0, x1]
 ; CHECK-NEXT: stnt1b { z[[DATA]].b }, p0, [x0, x1]
 ; CHECK-NEXT: ret
-  %gep = getelementptr i8, i8* %base, i64 %offset
+  %gep = getelementptr i8, ptr %base, i64 %offset
   %data = call <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1> %mask,
-                                                                  i8* %gep)
+                                                                  ptr %gep)
   call void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8> %data,
                                             <vscale x 16 x i1> %mask,
-                                            i8* %gep)
+                                            ptr %gep)
   ret void
 }
 
 ; 2-element non-temporal loads.
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, double*)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.nxv2i64(<vscale x 2 x i1>, ptr)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, ptr)
 
 ; 4-element non-temporal loads.
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, ptr)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, ptr)
 
 ; 8-element non-temporal loads.
-declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, half*)
-declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ldnt1.nxv8f16(<vscale x 8 x i1>, ptr)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>, ptr)
 
 ; 16-element non-temporal loads.
-declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, ptr)
 
 ; 2-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
-declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
+declare void @llvm.aarch64.sve.stnt1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr)
 
 ; 4-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
-declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
+declare void @llvm.aarch64.sve.stnt1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr)
 
 ; 8-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
-declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
-declare void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, bfloat*)
+declare void @llvm.aarch64.sve.stnt1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, ptr)
+declare void @llvm.aarch64.sve.stnt1.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x i1>, ptr)
 
 ; 16-element non-temporal stores.
-declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
+declare void @llvm.aarch64.sve.stnt1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, ptr)
 
 ; +bf16 is required for the bfloat version.
 attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll
index b0046ff051f62..970f55225daf5 100644
--- a/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s | FileCheck %s
 target triple = "aarch64-unknown-linux-gnu"
 
-define <vscale x 8 x i1> @masked_load_sext_i8i16(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 8 x i1> @masked_load_sext_i8i16(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl32
@@ -19,7 +19,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16(i8* %ap, <vscale x 16 x i8> %b)
 }
 
 ; This negative test ensures the two ptrues have the same vl
-define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_vl(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_vl(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i16_ptrue_vl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl64
@@ -38,7 +38,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_vl(i8* %ap, <vscale x 16
 }
 
 ; This negative test enforces that both predicates are ptrues
-define <vscale x 8 x i1> @masked_load_sext_i8i16_parg(i8* %ap, <vscale x 16 x i8> %b, <vscale x 16 x i1> %p0) #0 {
+define <vscale x 8 x i1> @masked_load_sext_i8i16_parg(ptr %ap, <vscale x 16 x i8> %b, <vscale x 16 x i1> %p0) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i16_parg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
@@ -54,7 +54,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16_parg(i8* %ap, <vscale x 16 x i8
   ret <vscale x 8 x i1> %cmp1
 }
 
-define <vscale x 4 x i1> @masked_load_sext_i8i32(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 4 x i1> @masked_load_sext_i8i32(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl32
@@ -72,7 +72,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32(i8* %ap, <vscale x 16 x i8> %b)
 }
 
 ; This negative test ensures the two ptrues have the same vl
-define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_vl(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_vl(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i32_ptrue_vl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl64
@@ -92,7 +92,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_vl(i8* %ap, <vscale x 16
 }
 
 ; This negative test enforces that both predicates are ptrues
-define <vscale x 4 x i1> @masked_load_sext_i8i32_parg(i8* %ap, <vscale x 16 x i8> %b, <vscale x 16 x i1> %p0) #0 {
+define <vscale x 4 x i1> @masked_load_sext_i8i32_parg(ptr %ap, <vscale x 16 x i8> %b, <vscale x 16 x i1> %p0) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i32_parg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
@@ -109,7 +109,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32_parg(i8* %ap, <vscale x 16 x i8
   ret <vscale x 4 x i1> %cmp1
 }
 
-define <vscale x 2 x i1> @masked_load_sext_i8i64(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 2 x i1> @masked_load_sext_i8i64(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl32
@@ -128,7 +128,7 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64(i8* %ap, <vscale x 16 x i8> %b)
 }
 
 ; This negative test ensures the two ptrues have the same vl
-define <vscale x 2 x i1> @masked_load_sext_i8i64_ptrue_vl(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 2 x i1> @masked_load_sext_i8i64_ptrue_vl(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i64_ptrue_vl:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl64
@@ -149,7 +149,7 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64_ptrue_vl(i8* %ap, <vscale x 16
 }
 
 ; This negative test enforces that both predicates are ptrues
-define <vscale x 2 x i1> @masked_load_sext_i8i64_parg(i8* %ap, <vscale x 16 x i8> %b, <vscale x 16 x i1> %p0) #0 {
+define <vscale x 2 x i1> @masked_load_sext_i8i64_parg(ptr %ap, <vscale x 16 x i8> %b, <vscale x 16 x i1> %p0) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i64_parg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmpeq p0.b, p0/z, z0.b, #0
@@ -168,7 +168,7 @@ define <vscale x 2 x i1> @masked_load_sext_i8i64_parg(i8* %ap, <vscale x 16 x i8
 }
 
 ; This negative test enforces that the ptrues have a specified vl
-define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_all(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_all(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i16_ptrue_all:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl64
@@ -187,7 +187,7 @@ define <vscale x 8 x i1> @masked_load_sext_i8i16_ptrue_all(i8* %ap, <vscale x 16
 }
 
 ; This negative test enforces that the ptrues have a specified vl
-define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_all(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_all(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i32_ptrue_all:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl64
@@ -207,7 +207,7 @@ define <vscale x 4 x i1> @masked_load_sext_i8i32_ptrue_all(i8* %ap, <vscale x 16
 }
 
 ; This negative test enforces that the ptrues have a specified vl
-define <vscale x 2 x i1> @masked_load_sext_i8i64_ptrue_all(i8* %ap, <vscale x 16 x i8> %b) #0 {
+define <vscale x 2 x i1> @masked_load_sext_i8i64_ptrue_all(ptr %ap, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: masked_load_sext_i8i64_ptrue_all:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b

diff  --git a/llvm/test/CodeGen/AArch64/sve-redundant-store.ll b/llvm/test/CodeGen/AArch64/sve-redundant-store.ll
index 0ee25ee32dc46..c2759a6e026fb 100644
--- a/llvm/test/CodeGen/AArch64/sve-redundant-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-redundant-store.ll
@@ -11,7 +11,7 @@
 
 ; Update me: Until dead store elimination is improved in DAGCombine, this will contain a redundant store.
 ;
-define void @redundant_store(i32* nocapture %p, <vscale x 4 x i32> %v) {
+define void @redundant_store(ptr nocapture %p, <vscale x 4 x i32> %v) {
 ; CHECK-LABEL: redundant_store:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -19,8 +19,7 @@ define void @redundant_store(i32* nocapture %p, <vscale x 4 x i32> %v) {
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  store i32 1, i32* %p, align 4
-  %1 = bitcast i32* %p to <vscale x 4 x i32>*
-  store <vscale x 4 x i32> %v, <vscale x 4 x i32>* %1, align 16
+  store i32 1, ptr %p, align 4
+  store <vscale x 4 x i32> %v, <vscale x 4 x i32>* %p, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-setcc.ll b/llvm/test/CodeGen/AArch64/sve-setcc.ll
index 60ee9b34d1760..7f9d9e86d4584 100644
--- a/llvm/test/CodeGen/AArch64/sve-setcc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-setcc.ll
@@ -17,7 +17,7 @@ entry:
   br i1 %1, label %if.then, label %if.end
 
 if.then:
-  tail call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
+  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
   br label %if.end
 
 if.end:
@@ -40,7 +40,7 @@ entry:
   br i1 %1, label %if.end, label %if.then
 
 if.then:
-  tail call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
+  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
   br label %if.end
 
 if.end:
@@ -66,7 +66,7 @@ entry:
   br i1 %1, label %if.then, label %if.end
 
 if.then:
-  tail call void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
+  tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> %in, <vscale x 8 x i16>* %out, i32 2, <vscale x 8 x i1> %pg)
   br label %if.end
 
 if.end:
@@ -123,4 +123,4 @@ declare i1 @llvm.aarch64.sve.ptest.last.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x
 
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
 
-declare void @llvm.masked.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
+declare void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)

diff  --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
index 846c479c50806..b659ded53a8c1 100644
--- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-reg.ll
@@ -3,278 +3,256 @@
 
 ; ST1B
 
-define void @st1_nxv16i8(i8* %addr, i64 %off, <vscale x 16 x i8> %val) {
+define void @st1_nxv16i8(ptr %addr, i64 %off, <vscale x 16 x i8> %val) {
 ; CHECK-LABEL: st1_nxv16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 16 x i8>*
-  store <vscale x 16 x i8> %val, <vscale x 16 x i8>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  store <vscale x 16 x i8> %val, <vscale x 16 x i8>* %ptr
   ret void
 }
 
-define void @st1_nxv16i8_bitcast_from_i16(i8* %addr, i64 %off, <vscale x 8 x i16> %val) {
+define void @st1_nxv16i8_bitcast_from_i16(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 ; CHECK-LABEL: st1_nxv16i8_bitcast_from_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 8 x i16>*
-  store <vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  store <vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr
   ret void
 }
 
-define void @st1_nxv16i8_bitcast_from_i32(i8* %addr, i64 %off, <vscale x 4 x i32> %val) {
+define void @st1_nxv16i8_bitcast_from_i32(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1_nxv16i8_bitcast_from_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 4 x i32>*
-  store <vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  store <vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr
   ret void
 }
 
-define void @st1_nxv16i8_bitcast_from_i64(i8* %addr, i64 %off, <vscale x 2 x i64> %val) {
+define void @st1_nxv16i8_bitcast_from_i64(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv16i8_bitcast_from_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 2 x i64>*
-  store <vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptrcast
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
+  store <vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr
   ret void
 }
 
-define void @st1_nxv8i16_trunc8(i8* %addr, i64 %off, <vscale x 8 x i16> %val) {
+define void @st1_nxv8i16_trunc8(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 ; CHECK-LABEL: st1_nxv8i16_trunc8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 8 x i8>*
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
   %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
-  store <vscale x 8 x i8> %trunc, <vscale x 8 x i8>* %ptrcast
+  store <vscale x 8 x i8> %trunc, <vscale x 8 x i8>* %ptr
   ret void
 }
 
-define void @st1_nxv4i32_trunc8(i8* %addr, i64 %off, <vscale x 4 x i32> %val) {
+define void @st1_nxv4i32_trunc8(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1_nxv4i32_trunc8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 4 x i8>*
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
-  store <vscale x 4 x i8> %trunc, <vscale x 4 x i8>* %ptrcast
+  store <vscale x 4 x i8> %trunc, <vscale x 4 x i8>* %ptr
   ret void
 }
 
-define void @st1_nxv2i64_trunc8(i8* %addr, i64 %off, <vscale x 2 x i64> %val) {
+define void @st1_nxv2i64_trunc8(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64_trunc8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x0, x1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i8, i8* %addr, i64 %off
-  %ptrcast = bitcast i8* %ptr to <vscale x 2 x i8>*
+  %ptr = getelementptr inbounds i8, ptr %addr, i64 %off
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
-  store <vscale x 2 x i8> %trunc, <vscale x 2 x i8>* %ptrcast
+  store <vscale x 2 x i8> %trunc, <vscale x 2 x i8>* %ptr
   ret void
 }
 
 ; ST1H
 
-define void @st1_nxv8i16(i16* %addr, i64 %off, <vscale x 8 x i16> %val) {
+define void @st1_nxv8i16(ptr %addr, i64 %off, <vscale x 8 x i16> %val) {
 ; CHECK-LABEL: st1_nxv8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 8 x i16>*
-  store <vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptrcast
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
+  store <vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr
   ret void
 }
 
-define void @st1_nxv4i32_trunc16(i16* %addr, i64 %off, <vscale x 4 x i32> %val) {
+define void @st1_nxv4i32_trunc16(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1_nxv4i32_trunc16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 4 x i16>*
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
   %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
-  store <vscale x 4 x i16> %trunc, <vscale x 4 x i16>* %ptrcast
+  store <vscale x 4 x i16> %trunc, <vscale x 4 x i16>* %ptr
   ret void
 }
 
-define void @st1_nxv2i64_trunc16(i16* %addr, i64 %off, <vscale x 2 x i64> %val) {
+define void @st1_nxv2i64_trunc16(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64_trunc16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i16, i16* %addr, i64 %off
-  %ptrcast = bitcast i16* %ptr to <vscale x 2 x i16>*
+  %ptr = getelementptr inbounds i16, ptr %addr, i64 %off
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
-  store <vscale x 2 x i16> %trunc, <vscale x 2 x i16>* %ptrcast
+  store <vscale x 2 x i16> %trunc, <vscale x 2 x i16>* %ptr
   ret void
 }
 
-define void @st1_nxv8f16(half* %addr, i64 %off, <vscale x 8 x half> %val) {
+define void @st1_nxv8f16(ptr %addr, i64 %off, <vscale x 8 x half> %val) {
 ; CHECK-LABEL: st1_nxv8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %off
-  %ptrcast = bitcast half* %ptr to <vscale x 8 x half>*
-  store <vscale x 8 x half> %val, <vscale x 8 x half>* %ptrcast
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %off
+  store <vscale x 8 x half> %val, <vscale x 8 x half>* %ptr
   ret void
 }
 
-define void @st1_nxv8bf16(bfloat* %addr, i64 %off, <vscale x 8 x bfloat> %val) {
+define void @st1_nxv8bf16(ptr %addr, i64 %off, <vscale x 8 x bfloat> %val) {
 ; CHECK-LABEL: st1_nxv8bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %off
-  %ptrcast = bitcast bfloat* %ptr to <vscale x 8 x bfloat>*
-  store <vscale x 8 x bfloat> %val, <vscale x 8 x bfloat>* %ptrcast
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
+  store <vscale x 8 x bfloat> %val, <vscale x 8 x bfloat>* %ptr
   ret void
 }
 
-define void @st1_nxv4f16(half* %addr, i64 %off, <vscale x 4 x half> %val) {
+define void @st1_nxv4f16(ptr %addr, i64 %off, <vscale x 4 x half> %val) {
 ; CHECK-LABEL: st1_nxv4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %off
-  %ptrcast = bitcast half* %ptr to <vscale x 4 x half>*
-  store <vscale x 4 x half> %val, <vscale x 4 x half>* %ptrcast
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %off
+  store <vscale x 4 x half> %val, <vscale x 4 x half>* %ptr
   ret void
 }
 
-define void @st1_nxv4bf16(bfloat* %addr, i64 %off, <vscale x 4 x bfloat> %val) {
+define void @st1_nxv4bf16(ptr %addr, i64 %off, <vscale x 4 x bfloat> %val) {
 ; CHECK-LABEL: st1_nxv4bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %off
-  %ptrcast = bitcast bfloat* %ptr to <vscale x 4 x bfloat>*
-  store <vscale x 4 x bfloat> %val, <vscale x 4 x bfloat>* %ptrcast
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
+  store <vscale x 4 x bfloat> %val, <vscale x 4 x bfloat>* %ptr
   ret void
 }
 
-define void @st1_nxv2f16(half* %addr, i64 %off, <vscale x 2 x half> %val) {
+define void @st1_nxv2f16(ptr %addr, i64 %off, <vscale x 2 x half> %val) {
 ; CHECK-LABEL: st1_nxv2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds half, half* %addr, i64 %off
-  %ptrcast = bitcast half* %ptr to <vscale x 2 x half>*
-  store <vscale x 2 x half> %val, <vscale x 2 x half>* %ptrcast
+  %ptr = getelementptr inbounds half, ptr %addr, i64 %off
+  store <vscale x 2 x half> %val, <vscale x 2 x half>* %ptr
   ret void
 }
 
-define void @st1_nxv2bf16(bfloat* %addr, i64 %off, <vscale x 2 x bfloat> %val) {
+define void @st1_nxv2bf16(ptr %addr, i64 %off, <vscale x 2 x bfloat> %val) {
 ; CHECK-LABEL: st1_nxv2bf16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x0, x1, lsl #1]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds bfloat, bfloat* %addr, i64 %off
-  %ptrcast = bitcast bfloat* %ptr to <vscale x 2 x bfloat>*
-  store <vscale x 2 x bfloat> %val, <vscale x 2 x bfloat>* %ptrcast
+  %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off
+  store <vscale x 2 x bfloat> %val, <vscale x 2 x bfloat>* %ptr
   ret void
 }
 
 ; ST1W
 
-define void @st1_nxv4i32(i32* %addr, i64 %off, <vscale x 4 x i32> %val) {
+define void @st1_nxv4i32(ptr %addr, i64 %off, <vscale x 4 x i32> %val) {
 ; CHECK-LABEL: st1_nxv4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %addr, i64 %off
-  %ptrcast = bitcast i32* %ptr to <vscale x 4 x i32>*
-  store <vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptrcast
+  %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
+  store <vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr
   ret void
 }
 
-define void @st1_nxv2i64_trunc32(i32* %addr, i64 %off, <vscale x 2 x i64> %val) {
+define void @st1_nxv2i64_trunc32(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64_trunc32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i32, i32* %addr, i64 %off
-  %ptrcast = bitcast i32* %ptr to <vscale x 2 x i32>*
+  %ptr = getelementptr inbounds i32, ptr %addr, i64 %off
   %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
-  store <vscale x 2 x i32> %trunc, <vscale x 2 x i32>* %ptrcast
+  store <vscale x 2 x i32> %trunc, <vscale x 2 x i32>* %ptr
   ret void
 }
 
-define void @st1_nxv4f32(float* %addr, i64 %off, <vscale x 4 x float> %val) {
+define void @st1_nxv4f32(ptr %addr, i64 %off, <vscale x 4 x float> %val) {
 ; CHECK-LABEL: st1_nxv4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %addr, i64 %off
-  %ptrcast = bitcast float* %ptr to <vscale x 4 x float>*
-  store <vscale x 4 x float> %val, <vscale x 4 x float>* %ptrcast
+  %ptr = getelementptr inbounds float, ptr %addr, i64 %off
+  store <vscale x 4 x float> %val, <vscale x 4 x float>* %ptr
   ret void
 }
 
-define void @st1_nxv2f32(float* %addr, i64 %off, <vscale x 2 x float> %val) {
+define void @st1_nxv2f32(ptr %addr, i64 %off, <vscale x 2 x float> %val) {
 ; CHECK-LABEL: st1_nxv2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0, x1, lsl #2]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds float, float* %addr, i64 %off
-  %ptrcast = bitcast float* %ptr to <vscale x 2 x float>*
-  store <vscale x 2 x float> %val, <vscale x 2 x float>* %ptrcast
+  %ptr = getelementptr inbounds float, ptr %addr, i64 %off
+  store <vscale x 2 x float> %val, <vscale x 2 x float>* %ptr
   ret void
 }
 
 ; ST1D
 
-define void @st1_nxv2i64(i64* %addr, i64 %off, <vscale x 2 x i64> %val) {
+define void @st1_nxv2i64(ptr %addr, i64 %off, <vscale x 2 x i64> %val) {
 ; CHECK-LABEL: st1_nxv2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds i64, i64* %addr, i64 %off
-  %ptrcast = bitcast i64* %ptr to <vscale x 2 x i64>*
-  store <vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptrcast
+  %ptr = getelementptr inbounds i64, ptr %addr, i64 %off
+  store <vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr
   ret void
 }
 
-define void @st1_nxv2f64(double* %addr, i64 %off, <vscale x 2 x double> %val) {
+define void @st1_nxv2f64(ptr %addr, i64 %off, <vscale x 2 x double> %val) {
 ; CHECK-LABEL: st1_nxv2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0, x1, lsl #3]
 ; CHECK-NEXT:    ret
-  %ptr = getelementptr inbounds double, double* %addr, i64 %off
-  %ptrcast = bitcast double* %ptr to <vscale x 2 x double>*
-  store <vscale x 2 x double> %val, <vscale x 2 x double>* %ptrcast
+  %ptr = getelementptr inbounds double, ptr %addr, i64 %off
+  store <vscale x 2 x double> %val, <vscale x 2 x double>* %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
index 3bed6b6c178b1..66b002928927e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
@@ -3,7 +3,7 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-define void @bitcast_v4i8(<4 x i8> *%a, <4 x i8>* %b) #0 {
+define void @bitcast_v4i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -11,37 +11,37 @@ define void @bitcast_v4i8(<4 x i8> *%a, <4 x i8>* %b) #0 {
 ; CHECK-NEXT:    uunpklo z0.h, z0.b
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <4 x i8>, <4 x i8>* %a
+  %load = load volatile <4 x i8>, ptr %a
   %cast = bitcast <4 x i8> %load to <4 x i8>
-  store volatile <4 x i8> %cast, <4 x i8>* %b
+  store volatile <4 x i8> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v8i8(<8 x i8> *%a, <8 x i8>* %b) #0 {
+define void @bitcast_v8i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <8 x i8>, <8 x i8>* %a
+  %load = load volatile <8 x i8>, ptr %a
   %cast = bitcast <8 x i8> %load to <8 x i8>
-  store volatile <8 x i8> %cast, <8 x i8>* %b
+  store volatile <8 x i8> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v16i8(<16 x i8> *%a, <16 x i8>* %b) #0 {
+define void @bitcast_v16i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <16 x i8>, <16 x i8>* %a
+  %load = load volatile <16 x i8>, ptr %a
   %cast = bitcast <16 x i8> %load to <16 x i8>
-  store volatile <16 x i8> %cast, <16 x i8>* %b
+  store volatile <16 x i8> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v32i8(<32 x i8> *%a, <32 x i8>* %b) #0 {
+define void @bitcast_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -49,13 +49,13 @@ define void @bitcast_v32i8(<32 x i8> *%a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    str q1, [x1, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <32 x i8>, <32 x i8>* %a
+  %load = load volatile <32 x i8>, ptr %a
   %cast = bitcast <32 x i8> %load to <32 x i8>
-  store volatile <32 x i8> %cast, <32 x i8>* %b
+  store volatile <32 x i8> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v2i16(<2 x i16> *%a, <2 x half>* %b) #0 {
+define void @bitcast_v2i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -75,37 +75,37 @@ define void @bitcast_v2i16(<2 x i16> *%a, <2 x half>* %b) #0 {
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %load = load volatile <2 x i16>, <2 x i16>* %a
+  %load = load volatile <2 x i16>, ptr %a
   %cast = bitcast <2 x i16> %load to <2 x half>
-  store volatile <2 x half> %cast, <2 x half>* %b
+  store volatile <2 x half> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v4i16(<4 x i16> *%a, <4 x half>* %b) #0 {
+define void @bitcast_v4i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <4 x i16>, <4 x i16>* %a
+  %load = load volatile <4 x i16>, ptr %a
   %cast = bitcast <4 x i16> %load to <4 x half>
-  store volatile <4 x half> %cast, <4 x half>* %b
+  store volatile <4 x half> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v8i16(<8 x i16> *%a, <8 x half>* %b) #0 {
+define void @bitcast_v8i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <8 x i16>, <8 x i16>* %a
+  %load = load volatile <8 x i16>, ptr %a
   %cast = bitcast <8 x i16> %load to <8 x half>
-  store volatile <8 x half> %cast, <8 x half>* %b
+  store volatile <8 x half> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v16i16(<16 x i16> *%a, <16 x half>* %b) #0 {
+define void @bitcast_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -113,37 +113,37 @@ define void @bitcast_v16i16(<16 x i16> *%a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    str q1, [x1, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <16 x i16>, <16 x i16>* %a
+  %load = load volatile <16 x i16>, ptr %a
   %cast = bitcast <16 x i16> %load to <16 x half>
-  store volatile <16 x half> %cast, <16 x half>* %b
+  store volatile <16 x half> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v2i32(<2 x i32> *%a, <2 x float>* %b) #0 {
+define void @bitcast_v2i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <2 x i32>, <2 x i32>* %a
+  %load = load volatile <2 x i32>, ptr %a
   %cast = bitcast <2 x i32> %load to <2 x float>
-  store volatile <2 x float> %cast, <2 x float>* %b
+  store volatile <2 x float> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v4i32(<4 x i32> *%a, <4 x float>* %b) #0 {
+define void @bitcast_v4i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <4 x i32>, <4 x i32>* %a
+  %load = load volatile <4 x i32>, ptr %a
   %cast = bitcast <4 x i32> %load to <4 x float>
-  store volatile <4 x float> %cast, <4 x float>* %b
+  store volatile <4 x float> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v8i32(<8 x i32> *%a, <8 x float>* %b) #0 {
+define void @bitcast_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -151,37 +151,37 @@ define void @bitcast_v8i32(<8 x i32> *%a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    str q1, [x1, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <8 x i32>, <8 x i32>* %a
+  %load = load volatile <8 x i32>, ptr %a
   %cast = bitcast <8 x i32> %load to <8 x float>
-  store volatile <8 x float> %cast, <8 x float>* %b
+  store volatile <8 x float> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v1i64(<1 x i64> *%a, <1 x double>* %b) #0 {
+define void @bitcast_v1i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v1i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <1 x i64>, <1 x i64>* %a
+  %load = load volatile <1 x i64>, ptr %a
   %cast = bitcast <1 x i64> %load to <1 x double>
-  store volatile <1 x double> %cast, <1 x double>* %b
+  store volatile <1 x double> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v2i64(<2 x i64> *%a, <2 x double>* %b) #0 {
+define void @bitcast_v2i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <2 x i64>, <2 x i64>* %a
+  %load = load volatile <2 x i64>, ptr %a
   %cast = bitcast <2 x i64> %load to <2 x double>
-  store volatile <2 x double> %cast, <2 x double>* %b
+  store volatile <2 x double> %cast, ptr %b
   ret void
 }
 
-define void @bitcast_v4i64(<4 x i64> *%a, <4 x double>* %b) #0 {
+define void @bitcast_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -189,9 +189,9 @@ define void @bitcast_v4i64(<4 x i64> *%a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    str q1, [x1, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %load = load volatile <4 x i64>, <4 x i64>* %a
+  %load = load volatile <4 x i64>, ptr %a
   %cast = bitcast <4 x i64> %load to <4 x double>
-  store volatile <4 x double> %cast, <4 x double>* %b
+  store volatile <4 x double> %cast, ptr %b
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
index fc58ab36ad2e3..c40c13d23da1d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
@@ -57,24 +57,24 @@ define <16 x i8> @concat_v16i8(<8 x i8> %op1, <8 x i8> %op2)  #0 {
   ret <16 x i8> %res
 }
 
-define void @concat_v32i8(<16 x i8>* %a, <16 x i8>* %b, <32 x i8>* %c)  #0 {
+define void @concat_v32i8(ptr %a, ptr %b, ptr %c)  #0 {
 ; CHECK-LABEL: concat_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    stp q1, q0, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i8>, <16 x i8>* %a
-  %op2 = load <16 x i8>, <16 x i8>* %b
+  %op1 = load <16 x i8>, ptr %a
+  %op2 = load <16 x i8>, ptr %b
   %res = shufflevector <16 x i8> %op1, <16 x i8> %op2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                    i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
                                                                    i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
                                                                    i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  store <32 x i8> %res, <32 x i8>* %c
+  store <32 x i8> %res, ptr %c
   ret void
 }
 
-define void @concat_v64i8(<32 x i8>* %a, <32 x i8>* %b, <64 x i8>* %c) #0 {
+define void @concat_v64i8(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: concat_v64i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
@@ -82,8 +82,8 @@ define void @concat_v64i8(<32 x i8>* %a, <32 x i8>* %b, <64 x i8>* %c) #0 {
 ; CHECK-NEXT:    stp q0, q1, [x2, #32]
 ; CHECK-NEXT:    stp q2, q3, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = shufflevector <32 x i8> %op1, <32 x i8> %op2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                    i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
                                                                    i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
@@ -92,7 +92,7 @@ define void @concat_v64i8(<32 x i8>* %a, <32 x i8>* %b, <64 x i8>* %c) #0 {
                                                                    i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47,
                                                                    i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55,
                                                                    i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
-  store <64 x i8> %res, <64 x i8>* %c
+  store <64 x i8> %res, ptr %c
   ret void
 }
 
@@ -138,22 +138,22 @@ define <8 x i16> @concat_v8i16(<4 x i16> %op1, <4 x i16> %op2)  #0 {
   ret <8 x i16> %res
 }
 
-define void @concat_v16i16(<8 x i16>* %a, <8 x i16>* %b, <16 x i16>* %c)  #0 {
+define void @concat_v16i16(ptr %a, ptr %b, ptr %c)  #0 {
 ; CHECK-LABEL: concat_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    stp q1, q0, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
-  %op2 = load <8 x i16>, <8 x i16>* %b
+  %op1 = load <8 x i16>, ptr %a
+  %op2 = load <8 x i16>, ptr %b
   %res = shufflevector <8 x i16> %op1, <8 x i16> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                    i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  store <16 x i16> %res, <16 x i16>* %c
+  store <16 x i16> %res, ptr %c
   ret void
 }
 
-define void @concat_v32i16(<16 x i16>* %a, <16 x i16>* %b, <32 x i16>* %c) #0 {
+define void @concat_v32i16(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: concat_v32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
@@ -161,13 +161,13 @@ define void @concat_v32i16(<16 x i16>* %a, <16 x i16>* %b, <32 x i16>* %c) #0 {
 ; CHECK-NEXT:    stp q0, q1, [x2, #32]
 ; CHECK-NEXT:    stp q2, q3, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = shufflevector <16 x i16> %op1, <16 x i16> %op2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                      i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
                                                                      i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
                                                                      i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  store <32 x i16> %res, <32 x i16>* %c
+  store <32 x i16> %res, ptr %c
   ret void
 }
 
@@ -202,21 +202,21 @@ define <4 x i32> @concat_v4i32(<2 x i32> %op1, <2 x i32> %op2)  #0 {
   ret <4 x i32> %res
 }
 
-define void @concat_v8i32(<4 x i32>* %a, <4 x i32>* %b, <8 x i32>* %c)  #0 {
+define void @concat_v8i32(ptr %a, ptr %b, ptr %c)  #0 {
 ; CHECK-LABEL: concat_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    stp q1, q0, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i32>, <4 x i32>* %a
-  %op2 = load <4 x i32>, <4 x i32>* %b
+  %op1 = load <4 x i32>, ptr %a
+  %op2 = load <4 x i32>, ptr %b
   %res = shufflevector <4 x i32> %op1, <4 x i32> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  store <8 x i32> %res, <8 x i32>* %c
+  store <8 x i32> %res, ptr %c
   ret void
 }
 
-define void @concat_v16i32(<8 x i32>* %a, <8 x i32>* %b, <16 x i32>* %c) #0 {
+define void @concat_v16i32(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: concat_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
@@ -224,11 +224,11 @@ define void @concat_v16i32(<8 x i32>* %a, <8 x i32>* %b, <16 x i32>* %c) #0 {
 ; CHECK-NEXT:    stp q0, q1, [x2, #32]
 ; CHECK-NEXT:    stp q2, q3, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = shufflevector <8 x i32> %op1, <8 x i32> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                    i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  store <16 x i32> %res, <16 x i32>* %c
+  store <16 x i32> %res, ptr %c
   ret void
 }
 
@@ -250,21 +250,21 @@ define <2 x i64> @concat_v2i64(<1 x i64> %op1, <1 x i64> %op2)  #0 {
   ret <2 x i64> %res
 }
 
-define void @concat_v4i64(<2 x i64>* %a, <2 x i64>* %b, <4 x i64>* %c)  #0 {
+define void @concat_v4i64(ptr %a, ptr %b, ptr %c)  #0 {
 ; CHECK-LABEL: concat_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    stp q1, q0, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x i64>, <2 x i64>* %a
-  %op2 = load <2 x i64>, <2 x i64>* %b
+  %op1 = load <2 x i64>, ptr %a
+  %op2 = load <2 x i64>, ptr %b
   %res = shufflevector <2 x i64> %op1, <2 x i64> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  store <4 x i64> %res, <4 x i64>* %c
+  store <4 x i64> %res, ptr %c
   ret void
 }
 
-define void @concat_v8i64(<4 x i64>* %a, <4 x i64>* %b, <8 x i64>* %c) #0 {
+define void @concat_v8i64(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: concat_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
@@ -272,10 +272,10 @@ define void @concat_v8i64(<4 x i64>* %a, <4 x i64>* %b, <8 x i64>* %c) #0 {
 ; CHECK-NEXT:    stp q0, q1, [x2, #32]
 ; CHECK-NEXT:    stp q2, q3, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = shufflevector <4 x i64> %op1, <4 x i64> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  store <8 x i64> %res, <8 x i64>* %c
+  store <8 x i64> %res, ptr %c
   ret void
 }
 
@@ -316,22 +316,22 @@ define <8 x half> @concat_v8f16(<4 x half> %op1, <4 x half> %op2)  #0 {
   ret <8 x half> %res
 }
 
-define void @concat_v16f16(<8 x half>* %a, <8 x half>* %b, <16 x half>* %c)  #0 {
+define void @concat_v16f16(ptr %a, ptr %b, ptr %c)  #0 {
 ; CHECK-LABEL: concat_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    stp q1, q0, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
-  %op2 = load <8 x half>, <8 x half>* %b
+  %op1 = load <8 x half>, ptr %a
+  %op2 = load <8 x half>, ptr %b
   %res = shufflevector <8 x half> %op1, <8 x half> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                      i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  store <16 x half> %res, <16 x half>* %c
+  store <16 x half> %res, ptr %c
   ret void
 }
 
-define void @concat_v32f16(<16 x half>* %a, <16 x half>* %b, <32 x half>* %c) #0 {
+define void @concat_v32f16(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: concat_v32f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
@@ -339,13 +339,13 @@ define void @concat_v32f16(<16 x half>* %a, <16 x half>* %b, <32 x half>* %c) #0
 ; CHECK-NEXT:    stp q0, q1, [x2, #32]
 ; CHECK-NEXT:    stp q2, q3, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
-  %op2 = load <16 x half>, <16 x half>* %b
+  %op1 = load <16 x half>, ptr %a
+  %op2 = load <16 x half>, ptr %b
   %res = shufflevector <16 x half> %op1, <16 x half> %op2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                        i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
                                                                        i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
                                                                        i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  store <32 x half> %res, <32 x half>* %c
+  store <32 x half> %res, ptr %c
   ret void
 }
 
@@ -380,21 +380,21 @@ define <4 x float> @concat_v4f32(<2 x float> %op1, <2 x float> %op2)  #0 {
   ret <4 x float> %res
 }
 
-define void @concat_v8f32(<4 x float>* %a, <4 x float>* %b, <8 x float>* %c)  #0 {
+define void @concat_v8f32(ptr %a, ptr %b, ptr %c)  #0 {
 ; CHECK-LABEL: concat_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    stp q1, q0, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x float>, <4 x float>* %a
-  %op2 = load <4 x float>, <4 x float>* %b
+  %op1 = load <4 x float>, ptr %a
+  %op2 = load <4 x float>, ptr %b
   %res = shufflevector <4 x float> %op1, <4 x float> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  store <8 x float> %res, <8 x float>* %c
+  store <8 x float> %res, ptr %c
   ret void
 }
 
-define void @concat_v16f32(<8 x float>* %a, <8 x float>* %b, <16 x float>* %c) #0 {
+define void @concat_v16f32(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: concat_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
@@ -402,11 +402,11 @@ define void @concat_v16f32(<8 x float>* %a, <8 x float>* %b, <16 x float>* %c) #
 ; CHECK-NEXT:    stp q0, q1, [x2, #32]
 ; CHECK-NEXT:    stp q2, q3, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
-  %op2 = load <8 x float>, <8 x float>* %b
+  %op1 = load <8 x float>, ptr %a
+  %op2 = load <8 x float>, ptr %b
   %res = shufflevector <8 x float> %op1, <8 x float> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                        i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  store <16 x float> %res, <16 x float>* %c
+  store <16 x float> %res, ptr %c
   ret void
 }
 
@@ -428,21 +428,21 @@ define <2 x double> @concat_v2f64(<1 x double> %op1, <1 x double> %op2)  #0 {
   ret <2 x double> %res
 }
 
-define void @concat_v4f64(<2 x double>* %a, <2 x double>* %b, <4 x double>* %c)  #0 {
+define void @concat_v4f64(ptr %a, ptr %b, ptr %c)  #0 {
 ; CHECK-LABEL: concat_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x1]
 ; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    stp q1, q0, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x double>, <2 x double>* %a
-  %op2 = load <2 x double>, <2 x double>* %b
+  %op1 = load <2 x double>, ptr %a
+  %op2 = load <2 x double>, ptr %b
   %res = shufflevector <2 x double> %op1, <2 x double> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  store <4 x double> %res, <4 x double>* %c
+  store <4 x double> %res, ptr %c
   ret void
 }
 
-define void @concat_v8f64(<4 x double>* %a, <4 x double>* %b, <8 x double>* %c) #0 {
+define void @concat_v8f64(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: concat_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
@@ -450,10 +450,10 @@ define void @concat_v8f64(<4 x double>* %a, <4 x double>* %b, <8 x double>* %c)
 ; CHECK-NEXT:    stp q0, q1, [x2, #32]
 ; CHECK-NEXT:    stp q2, q3, [x2]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
-  %op2 = load <4 x double>, <4 x double>* %b
+  %op1 = load <4 x double>, ptr %a
+  %op2 = load <4 x double>, ptr %b
   %res = shufflevector <4 x double> %op1, <4 x double> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  store <8 x double> %res, <8 x double>* %c
+  store <8 x double> %res, ptr %c
   ret void
 }
 
@@ -461,55 +461,55 @@ define void @concat_v8f64(<4 x double>* %a, <4 x double>* %b, <8 x double>* %c)
 ; undef
 ;
 
-define void @concat_v32i8_undef(<16 x i8>* %a, <32 x i8>* %b)  #0 {
+define void @concat_v32i8_undef(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v32i8_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i8>, <16 x i8>* %a
+  %op1 = load <16 x i8>, ptr %a
   %res = shufflevector <16 x i8> %op1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                     i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
                                                                     i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
                                                                     i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  store <32 x i8> %res, <32 x i8>* %b
+  store <32 x i8> %res, ptr %b
   ret void
 }
 
-define void @concat_v16i16_undef(<8 x i16>* %a, <16 x i16>* %b)  #0 {
+define void @concat_v16i16_undef(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v16i16_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op1 = load <8 x i16>, ptr %a
   %res = shufflevector <8 x i16> %op1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                     i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
-define void @concat_v8i32_undef(<4 x i32>* %a, <8 x i32>* %b)  #0 {
+define void @concat_v8i32_undef(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v8i32_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i32>, <4 x i32>* %a
+  %op1 = load <4 x i32>, ptr %a
   %res = shufflevector <4 x i32> %op1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
-define void @concat_v4i64_undef(<2 x i64>* %a, <4 x i64>* %b)  #0 {
+define void @concat_v4i64_undef(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v4i64_undef:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x i64>, <2 x i64>* %a
+  %op1 = load <2 x i64>, ptr %a
   %res = shufflevector <2 x i64> %op1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 
@@ -517,60 +517,60 @@ define void @concat_v4i64_undef(<2 x i64>* %a, <4 x i64>* %b)  #0 {
 ; > 2 operands
 ;
 
-define void @concat_v32i8_4op(<8 x i8>* %a, <32 x i8>* %b)  #0 {
+define void @concat_v32i8_4op(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v32i8_4op:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i8>, <8 x i8>* %a
+  %op1 = load <8 x i8>, ptr %a
   %shuffle = shufflevector <8 x i8> %op1, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                       i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   %res = shufflevector <16 x i8> %shuffle, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                         i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
                                                                         i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
                                                                         i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-  store <32 x i8> %res, <32 x i8>* %b
+  store <32 x i8> %res, ptr %b
   ret void
 }
 
-define void @concat_v16i16_4op(<4 x i16>* %a, <16 x i16>* %b)  #0 {
+define void @concat_v16i16_4op(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v16i16_4op:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i16>, <4 x i16>* %a
+  %op1 = load <4 x i16>, ptr %a
   %shuffle = shufflevector <4 x i16> %op1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %res = shufflevector <8 x i16> %shuffle, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
                                                                         i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
-define void @concat_v8i32_4op(<2 x i32>* %a, <8 x i32>* %b)  #0 {
+define void @concat_v8i32_4op(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v8i32_4op:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x i32>, <2 x i32>* %a
+  %op1 = load <2 x i32>, ptr %a
   %shuffle = shufflevector <2 x i32> %op1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %res = shufflevector <4 x i32> %shuffle, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
-define void @concat_v4i64_4op(<1 x i64>* %a, <4 x i64>* %b)  #0 {
+define void @concat_v4i64_4op(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: concat_v4i64_4op:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <1 x i64>, <1 x i64>* %a
+  %op1 = load <1 x i64>, ptr %a
   %shuffle = shufflevector <1 x i64> %op1, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
   %res = shufflevector <2 x i64> %shuffle, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
index 7cac1fc069870..3e6892a018402 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
@@ -3,7 +3,7 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-define <8 x i16> @load_zext_v8i8i16(<8 x i8>* %ap)  #0 {
+define <8 x i16> @load_zext_v8i8i16(ptr %ap)  #0 {
 ; CHECK-LABEL: load_zext_v8i8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s1, s0, [x0]
@@ -13,36 +13,36 @@ define <8 x i16> @load_zext_v8i8i16(<8 x i8>* %ap)  #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <8 x i8>, <8 x i8>* %ap
+  %a = load <8 x i8>, ptr %ap
   %val = zext <8 x i8> %a to <8 x i16>
   ret <8 x i16> %val
 }
 
-define <4 x i32> @load_zext_v4i16i32(<4 x i16>* %ap)  #0 {
+define <4 x i32> @load_zext_v4i16i32(ptr %ap)  #0 {
 ; CHECK-LABEL: load_zext_v4i16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    uunpklo z0.s, z0.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <4 x i16>, <4 x i16>* %ap
+  %a = load <4 x i16>, ptr %ap
   %val = zext <4 x i16> %a to <4 x i32>
   ret <4 x i32> %val
 }
 
-define <2 x i64> @load_zext_v2i32i64(<2 x i32>* %ap) #0 {
+define <2 x i64> @load_zext_v2i32i64(ptr %ap) #0 {
 ; CHECK-LABEL: load_zext_v2i32i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    uunpklo z0.d, z0.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <2 x i32>, <2 x i32>* %ap
+  %a = load <2 x i32>, ptr %ap
   %val = zext <2 x i32> %a to <2 x i64>
   ret <2 x i64> %val
 }
 
-define <2 x i256> @load_zext_v2i64i256(<2 x i64>* %ap) #0 {
+define <2 x i256> @load_zext_v2i64i256(ptr %ap) #0 {
 ; CHECK-LABEL: load_zext_v2i64i256:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #0 // =0x0
@@ -58,12 +58,12 @@ define <2 x i256> @load_zext_v2i64i256(<2 x i64>* %ap) #0 {
 ; CHECK-NEXT:    mov x6, x2
 ; CHECK-NEXT:    mov x7, x3
 ; CHECK-NEXT:    ret
-  %a = load <2 x i64>, <2 x i64>* %ap
+  %a = load <2 x i64>, ptr %ap
   %val = zext <2 x i64> %a to <2 x i256>
   ret <2 x i256> %val
 }
 
-define <16 x i32> @load_sext_v16i8i32(<16 x i8>* %ap)  #0 {
+define <16 x i32> @load_sext_v16i8i32(ptr %ap)  #0 {
 ; CHECK-LABEL: load_sext_v16i8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x0]
@@ -81,12 +81,12 @@ define <16 x i32> @load_sext_v16i8i32(<16 x i8>* %ap)  #0 {
 ; CHECK-NEXT:    // kill: def $q2 killed $q2 killed $z2
 ; CHECK-NEXT:    // kill: def $q3 killed $q3 killed $z3
 ; CHECK-NEXT:    ret
-  %a = load <16 x i8>, <16 x i8>* %ap
+  %a = load <16 x i8>, ptr %ap
   %val = sext <16 x i8> %a to <16 x i32>
   ret <16 x i32> %val
 }
 
-define <8 x i32> @load_sext_v8i16i32(<8 x i16>* %ap)  #0 {
+define <8 x i32> @load_sext_v8i16i32(ptr %ap)  #0 {
 ; CHECK-LABEL: load_sext_v8i16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q1, [x0]
@@ -96,12 +96,12 @@ define <8 x i32> @load_sext_v8i16i32(<8 x i16>* %ap)  #0 {
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-  %a = load <8 x i16>, <8 x i16>* %ap
+  %a = load <8 x i16>, ptr %ap
   %val = sext <8 x i16> %a to <8 x i32>
   ret <8 x i32> %val
 }
 
-define <4 x i256> @load_sext_v4i32i256(<4 x i32>* %ap) #0 {
+define <4 x i256> @load_sext_v4i32i256(ptr %ap) #0 {
 ; CHECK-LABEL: load_sext_v4i32i256:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -127,12 +127,12 @@ define <4 x i256> @load_sext_v4i32i256(<4 x i32>* %ap) #0 {
 ; CHECK-NEXT:    stp x12, x12, [x8, #48]
 ; CHECK-NEXT:    stp x11, x12, [x8, #32]
 ; CHECK-NEXT:    ret
-  %a = load <4 x i32>, <4 x i32>* %ap
+  %a = load <4 x i32>, ptr %ap
   %val = sext <4 x i32> %a to <4 x i256>
   ret <4 x i256> %val
 }
 
-define <2 x i256> @load_sext_v2i64i256(<2 x i64>* %ap) #0 {
+define <2 x i256> @load_sext_v2i64i256(ptr %ap) #0 {
 ; CHECK-LABEL: load_sext_v2i64i256:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -160,12 +160,12 @@ define <2 x i256> @load_sext_v2i64i256(<2 x i64>* %ap) #0 {
 ; CHECK-NEXT:    fmov x1, d0
 ; CHECK-NEXT:    fmov x5, d1
 ; CHECK-NEXT:    ret
-  %a = load <2 x i64>, <2 x i64>* %ap
+  %a = load <2 x i64>, ptr %ap
   %val = sext <2 x i64> %a to <2 x i256>
   ret <2 x i256> %val
 }
 
-define <16 x i64> @load_zext_v16i16i64(<16 x i16>* %ap)  #0 {
+define <16 x i64> @load_zext_v16i16i64(ptr %ap)  #0 {
 ; CHECK-LABEL: load_zext_v16i16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q2, [x0]
@@ -196,7 +196,7 @@ define <16 x i64> @load_zext_v16i16i64(<16 x i16>* %ap)  #0 {
 ; CHECK-NEXT:    // kill: def $q6 killed $q6 killed $z6
 ; CHECK-NEXT:    // kill: def $q7 killed $q7 killed $z7
 ; CHECK-NEXT:    ret
-  %a = load <16 x i16>, <16 x i16>* %ap
+  %a = load <16 x i16>, ptr %ap
   %val = zext <16 x i16> %a to <16 x i64>
   ret <16 x i64> %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
index 2999b84360a71..9e4c1862bcb06 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
@@ -68,15 +68,15 @@ define <8 x i8> @extract_subvector_v16i8(<16 x i8> %op) #0 {
   ret <8 x i8> %ret
 }
 
-define void @extract_subvector_v32i8(<32 x i8>* %a, <16 x i8>* %b) #0 {
+define void @extract_subvector_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: extract_subvector_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op = load <32 x i8>, <32 x i8>* %a
+  %op = load <32 x i8>, ptr %a
   %ret = call <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16)
-  store <16 x i8> %ret, <16 x i8>* %b
+  store <16 x i8> %ret, ptr %b
   ret void
 }
 
@@ -105,15 +105,15 @@ define <4 x i16> @extract_subvector_v8i16(<8 x i16> %op) #0 {
   ret <4 x i16> %ret
 }
 
-define void @extract_subvector_v16i16(<16 x i16>* %a, <8 x i16>* %b) #0 {
+define void @extract_subvector_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: extract_subvector_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %ret = call <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8)
-  store <8 x i16> %ret, <8 x i16>* %b
+  store <8 x i16> %ret, ptr %b
   ret void
 }
 
@@ -141,15 +141,15 @@ define <2 x i32> @extract_subvector_v4i32(<4 x i32> %op) #0 {
   ret <2 x i32> %ret
 }
 
-define void @extract_subvector_v8i32(<8 x i32>* %a, <4 x i32>* %b) #0 {
+define void @extract_subvector_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: extract_subvector_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %ret = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4)
-  store <4 x i32> %ret, <4 x i32>* %b
+  store <4 x i32> %ret, ptr %b
   ret void
 }
 
@@ -166,15 +166,15 @@ define <1 x i64> @extract_subvector_v2i64(<2 x i64> %op) #0 {
   ret <1 x i64> %ret
 }
 
-define void @extract_subvector_v4i64(<4 x i64>* %a, <2 x i64>* %b) #0 {
+define void @extract_subvector_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: extract_subvector_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %ret = call <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2)
-  store <2 x i64> %ret, <2 x i64>* %b
+  store <2 x i64> %ret, ptr %b
   ret void
 }
 
@@ -208,15 +208,15 @@ define <4 x half> @extract_subvector_v8f16(<8 x half> %op) #0 {
   ret <4 x half> %ret
 }
 
-define void @extract_subvector_v16f16(<16 x half>* %a, <8 x half>* %b) #0 {
+define void @extract_subvector_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: extract_subvector_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %ret = call <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8)
-  store <8 x half> %ret, <8 x half>* %b
+  store <8 x half> %ret, ptr %b
   ret void
 }
 
@@ -244,15 +244,15 @@ define <2 x float> @extract_subvector_v4f32(<4 x float> %op) #0 {
   ret <2 x float> %ret
 }
 
-define void @extract_subvector_v8f32(<8 x float>* %a, <4 x float>* %b) #0 {
+define void @extract_subvector_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: extract_subvector_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %ret = call <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4)
-  store <4 x float> %ret, <4 x float>* %b
+  store <4 x float> %ret, ptr %b
   ret void
 }
 
@@ -269,15 +269,15 @@ define <1 x double> @extract_subvector_v2f64(<2 x double> %op) #0 {
   ret <1 x double> %ret
 }
 
-define void @extract_subvector_v4f64(<4 x double>* %a, <2 x double>* %b) #0 {
+define void @extract_subvector_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: extract_subvector_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %ret = call <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2)
-  store <2 x double> %ret, <2 x double>* %b
+  store <2 x double> %ret, ptr %b
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
index ad7e637afeea3..4bb9e565746cb 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
@@ -40,14 +40,14 @@ define half @extractelement_v8f16(<8 x half> %op1) #0 {
   ret half %r
 }
 
-define half @extractelement_v16f16(<16 x half>* %a) #0 {
+define half @extractelement_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: extractelement_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    mov z0.h, z0.h[7]
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %r = extractelement <16 x half> %op1, i64 15
   ret half %r
 }
@@ -74,14 +74,14 @@ define float @extractelement_v4f32(<4 x float> %op1) #0 {
   ret float %r
 }
 
-define float @extractelement_v8f32(<8 x float>* %a) #0 {
+define float @extractelement_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: extractelement_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    mov z0.s, z0.s[3]
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %r = extractelement <8 x float> %op1, i64 7
   ret float %r
 }
@@ -106,14 +106,14 @@ define double @extractelement_v2f64(<2 x double> %op1) #0 {
   ret double %r
 }
 
-define double @extractelement_v4f64(<4 x double>* %a) #0 {
+define double @extractelement_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: extractelement_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    mov z0.d, z0.d[1]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %r = extractelement <4 x double> %op1, i64 3
   ret double %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
index 5970d2f6beac0..3215802062f28 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
@@ -4,7 +4,7 @@
 target triple = "aarch64-unknown-linux-gnu"
 
 ; Ensure we don't crash when trying to combine fp<->int conversions
-define void @fp_convert_combine_crash(<8 x float> *%a, <8 x i32> *%b) #0 {
+define void @fp_convert_combine_crash(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fp_convert_combine_crash:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -16,11 +16,11 @@ define void @fp_convert_combine_crash(<8 x float> *%a, <8 x i32> *%b) #0 {
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %f = load <8 x float>, <8 x float>* %a
+  %f = load <8 x float>, ptr %a
   %mul.i = fmul <8 x float> %f, <float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00,
                                  float 8.000000e+00, float 8.000000e+00, float 8.000000e+00, float 8.000000e+00>
   %vcvt.i = fptosi <8 x float> %mul.i to <8 x i32>
-  store <8 x i32> %vcvt.i, <8 x i32>* %b
+  store <8 x i32> %vcvt.i, ptr %b
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
index 130beca2f2c17..7abde39f9e8e6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
@@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; FCVT H -> S
 ;
 
-define void @fcvt_v2f16_v2f32(<2 x half>* %a, <2 x float>* %b) #0 {
+define void @fcvt_v2f16_v2f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f16_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -22,13 +22,13 @@ define void @fcvt_v2f16_v2f32(<2 x half>* %a, <2 x float>* %b) #0 {
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x half>, <2 x half>* %a
+  %op1 = load <2 x half>, ptr %a
   %res = fpext <2 x half> %op1 to <2 x float>
-  store <2 x float> %res, <2 x float>* %b
+  store <2 x float> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v4f16_v4f32(<4 x half>* %a, <4 x float>* %b) #0 {
+define void @fcvt_v4f16_v4f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f16_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -49,13 +49,13 @@ define void @fcvt_v4f16_v4f32(<4 x half>* %a, <4 x float>* %b) #0 {
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x half>, <4 x half>* %a
+  %op1 = load <4 x half>, ptr %a
   %res = fpext <4 x half> %op1 to <4 x float>
-  store <4 x float> %res, <4 x float>* %b
+  store <4 x float> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
+define void @fcvt_v8f16_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v8f16_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -88,13 +88,13 @@ define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fpext <8 x half> %op1 to <8 x float>
-  store <8 x float> %res, <8 x float>* %b
+  store <8 x float> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
+define void @fcvt_v16f16_v16f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v16f16_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -153,9 +153,9 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
 ; CHECK-NEXT:    stp q3, q2, [x1, #32]
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fpext <16 x half> %op1 to <16 x float>
-  store <16 x float> %res, <16 x float>* %b
+  store <16 x float> %res, ptr %b
   ret void
 }
 
@@ -163,20 +163,20 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
 ; FCVT H -> D
 ;
 
-define void @fcvt_v1f16_v1f64(<1 x half>* %a, <1 x double>* %b) #0 {
+define void @fcvt_v1f16_v1f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v1f16_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x0]
 ; CHECK-NEXT:    fcvt d0, h0
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <1 x half>, <1 x half>* %a
+  %op1 = load <1 x half>, ptr %a
   %res = fpext <1 x half> %op1 to <1 x double>
-  store <1 x double> %res, <1 x double>* %b
+  store <1 x double> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v2f16_v2f64(<2 x half>* %a, <2 x double>* %b) #0 {
+define void @fcvt_v2f16_v2f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f16_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -191,13 +191,13 @@ define void @fcvt_v2f16_v2f64(<2 x half>* %a, <2 x double>* %b) #0 {
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x half>, <2 x half>* %a
+  %op1 = load <2 x half>, ptr %a
   %res = fpext <2 x half> %op1 to <2 x double>
-  store <2 x double> %res, <2 x double>* %b
+  store <2 x double> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 {
+define void @fcvt_v4f16_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f16_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -218,13 +218,13 @@ define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x half>, <4 x half>* %a
+  %op1 = load <4 x half>, ptr %a
   %res = fpext <4 x half> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
+define void @fcvt_v8f16_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v8f16_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -259,13 +259,13 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
 ; CHECK-NEXT:    stp q3, q2, [x1, #32]
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fpext <8 x half> %op1 to <8 x double>
-  store <8 x double> %res, <8 x double>* %b
+  store <8 x double> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
+define void @fcvt_v16f16_v16f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v16f16_v16f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #128
@@ -328,9 +328,9 @@ define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
 ; CHECK-NEXT:    stp q4, q5, [x1, #64]
 ; CHECK-NEXT:    add sp, sp, #128
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fpext <16 x half> %op1 to <16 x double>
-  store <16 x double> %res, <16 x double>* %b
+  store <16 x double> %res, ptr %b
   ret void
 }
 
@@ -338,20 +338,20 @@ define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
 ; FCVT S -> D
 ;
 
-define void @fcvt_v1f32_v1f64(<1 x float>* %a, <1 x double>* %b) #0 {
+define void @fcvt_v1f32_v1f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v1f32_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    fcvt d0, s0
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <1 x float>, <1 x float>* %a
+  %op1 = load <1 x float>, ptr %a
   %res = fpext <1 x float> %op1 to <1 x double>
-  store <1 x double> %res, <1 x double>* %b
+  store <1 x double> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v2f32_v2f64(<2 x float>* %a, <2 x double>* %b) #0 {
+define void @fcvt_v2f32_v2f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f32_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -366,13 +366,13 @@ define void @fcvt_v2f32_v2f64(<2 x float>* %a, <2 x double>* %b) #0 {
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x float>, <2 x float>* %a
+  %op1 = load <2 x float>, ptr %a
   %res = fpext <2 x float> %op1 to <2 x double>
-  store <2 x double> %res, <2 x double>* %b
+  store <2 x double> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
+define void @fcvt_v4f32_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f32_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -393,13 +393,13 @@ define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x float>, <4 x float>* %a
+  %op1 = load <4 x float>, ptr %a
   %res = fpext <4 x float> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
+define void @fcvt_v8f32_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v8f32_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -434,9 +434,9 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
 ; CHECK-NEXT:    stp q3, q2, [x1, #32]
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fpext <8 x float> %op1 to <8 x double>
-  store <8 x double> %res, <8 x double>* %b
+  store <8 x double> %res, ptr %b
   ret void
 }
 
@@ -444,7 +444,7 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
 ; FCVT S -> H
 ;
 
-define void @fcvt_v2f32_v2f16(<2 x float>* %a, <2 x half>* %b) #0 {
+define void @fcvt_v2f32_v2f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f32_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -452,13 +452,13 @@ define void @fcvt_v2f32_v2f16(<2 x float>* %a, <2 x half>* %b) #0 {
 ; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x float>, <2 x float>* %a
+  %op1 = load <2 x float>, ptr %a
   %res = fptrunc <2 x float> %op1 to <2 x half>
-  store <2 x half> %res, <2 x half>* %b
+  store <2 x half> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v4f32_v4f16(<4 x float>* %a, <4 x half>* %b) #0 {
+define void @fcvt_v4f32_v4f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f32_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -466,13 +466,13 @@ define void @fcvt_v4f32_v4f16(<4 x float>* %a, <4 x half>* %b) #0 {
 ; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x float>, <4 x float>* %a
+  %op1 = load <4 x float>, ptr %a
   %res = fptrunc <4 x float> %op1 to <4 x half>
-  store <4 x half> %res, <4 x half>* %b
+  store <4 x half> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v8f32_v8f16(<8 x float>* %a, <8 x half>* %b) #0 {
+define void @fcvt_v8f32_v8f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v8f32_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -483,9 +483,9 @@ define void @fcvt_v8f32_v8f16(<8 x float>* %a, <8 x half>* %b) #0 {
 ; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
 ; CHECK-NEXT:    st1h { z1.s }, p0, [x1, x8, lsl #1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fptrunc <8 x float> %op1 to <8 x half>
-  store <8 x half> %res, <8 x half>* %b
+  store <8 x half> %res, ptr %b
   ret void
 }
 
@@ -493,7 +493,7 @@ define void @fcvt_v8f32_v8f16(<8 x float>* %a, <8 x half>* %b) #0 {
 ; FCVT D -> H
 ;
 
-define void @fcvt_v1f64_v1f16(<1 x double>* %a, <1 x half>* %b) #0 {
+define void @fcvt_v1f64_v1f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v1f64_v1f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -501,13 +501,13 @@ define void @fcvt_v1f64_v1f16(<1 x double>* %a, <1 x half>* %b) #0 {
 ; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <1 x double>, <1 x double>* %a
+  %op1 = load <1 x double>, ptr %a
   %res = fptrunc <1 x double> %op1 to <1 x half>
-  store <1 x half> %res, <1 x half>* %b
+  store <1 x half> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v2f64_v2f16(<2 x double>* %a, <2 x half>* %b) #0 {
+define void @fcvt_v2f64_v2f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f64_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -515,13 +515,13 @@ define void @fcvt_v2f64_v2f16(<2 x double>* %a, <2 x half>* %b) #0 {
 ; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <2 x double>, <2 x double>* %a
+  %op1 = load <2 x double>, ptr %a
   %res = fptrunc <2 x double> %op1 to <2 x half>
-  store <2 x half> %res, <2 x half>* %b
+  store <2 x half> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v4f64_v4f16(<4 x double>* %a, <4 x half>* %b) #0 {
+define void @fcvt_v4f64_v4f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f64_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -532,9 +532,9 @@ define void @fcvt_v4f64_v4f16(<4 x double>* %a, <4 x half>* %b) #0 {
 ; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
 ; CHECK-NEXT:    st1h { z1.d }, p0, [x1, x8, lsl #1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptrunc <4 x double> %op1 to <4 x half>
-  store <4 x half> %res, <4 x half>* %b
+  store <4 x half> %res, ptr %b
   ret void
 }
 
@@ -542,7 +542,7 @@ define void @fcvt_v4f64_v4f16(<4 x double>* %a, <4 x half>* %b) #0 {
 ; FCVT D -> S
 ;
 
-define void @fcvt_v1f64_v1f32(<1 x double> %op1, <1 x float>* %b) #0 {
+define void @fcvt_v1f64_v1f32(<1 x double> %op1, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v1f64_v1f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -551,11 +551,11 @@ define void @fcvt_v1f64_v1f32(<1 x double> %op1, <1 x float>* %b) #0 {
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %res = fptrunc <1 x double> %op1 to <1 x float>
-  store <1 x float> %res, <1 x float>* %b
+  store <1 x float> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v2f64_v2f32(<2 x double> %op1, <2 x float>* %b) #0 {
+define void @fcvt_v2f64_v2f32(<2 x double> %op1, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f64_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -564,11 +564,11 @@ define void @fcvt_v2f64_v2f32(<2 x double> %op1, <2 x float>* %b) #0 {
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %res = fptrunc <2 x double> %op1 to <2 x float>
-  store <2 x float> %res, <2 x float>* %b
+  store <2 x float> %res, ptr %b
   ret void
 }
 
-define void @fcvt_v4f64_v4f32(<4 x double>* %a, <4 x float>* %b) #0 {
+define void @fcvt_v4f64_v4f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f64_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -579,9 +579,9 @@ define void @fcvt_v4f64_v4f32(<4 x double>* %a, <4 x float>* %b) #0 {
 ; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
 ; CHECK-NEXT:    st1w { z1.d }, p0, [x1, x8, lsl #2]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptrunc <4 x double> %op1 to <4 x float>
-  store <4 x float> %res, <4 x float>* %b
+  store <4 x float> %res, ptr %b
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
index aff88e09235ec..5c96c56f1d3e6 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
@@ -37,7 +37,7 @@ define <8 x half> @fma_v8f16(<8 x half> %op1, <8 x half> %op2, <8 x half> %op3)
   ret <8 x half> %res
 }
 
-define void @fma_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x half>* %c) #0 {
+define void @fma_v16f16(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fma_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q3, [x1]
@@ -49,12 +49,12 @@ define void @fma_v16f16(<16 x half>* %a, <16 x half>* %b, <16 x half>* %c) #0 {
 ; CHECK-NEXT:    fmla z1.h, p0/m, z2.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
-  %op2 = load <16 x half>, <16 x half>* %b
-  %op3 = load <16 x half>, <16 x half>* %c
+  %op1 = load <16 x half>, ptr %a
+  %op2 = load <16 x half>, ptr %b
+  %op3 = load <16 x half>, ptr %c
   %mul = fmul contract <16 x half> %op1, %op2
   %res = fadd contract <16 x half> %mul, %op3
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -88,7 +88,7 @@ define <4 x float> @fma_v4f32(<4 x float> %op1, <4 x float> %op2, <4 x float> %o
   ret <4 x float> %res
 }
 
-define void @fma_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) #0 {
+define void @fma_v8f32(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fma_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q3, [x1]
@@ -100,12 +100,12 @@ define void @fma_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) #0 {
 ; CHECK-NEXT:    fmla z1.s, p0/m, z2.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
-  %op2 = load <8 x float>, <8 x float>* %b
-  %op3 = load <8 x float>, <8 x float>* %c
+  %op1 = load <8 x float>, ptr %a
+  %op2 = load <8 x float>, ptr %b
+  %op3 = load <8 x float>, ptr %c
   %mul = fmul contract <8 x float> %op1, %op2
   %res = fadd contract <8 x float> %mul, %op3
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -137,7 +137,7 @@ define <2 x double> @fma_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x double
   ret <2 x double> %res
 }
 
-define void @fma_v4f64(<4 x double>* %a, <4 x double>* %b, <4 x double>* %c) #0 {
+define void @fma_v4f64(ptr %a, ptr %b, ptr %c) #0 {
 ; CHECK-LABEL: fma_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q3, [x1]
@@ -149,12 +149,12 @@ define void @fma_v4f64(<4 x double>* %a, <4 x double>* %b, <4 x double>* %c) #0
 ; CHECK-NEXT:    fmla z1.d, p0/m, z2.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
-  %op2 = load <4 x double>, <4 x double>* %b
-  %op3 = load <4 x double>, <4 x double>* %c
+  %op1 = load <4 x double>, ptr %a
+  %op2 = load <4 x double>, ptr %b
+  %op3 = load <4 x double>, ptr %c
   %mul = fmul contract <4 x double> %op1, %op2
   %res = fadd contract <4 x double> %mul, %op3
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
index e244ccf19b500..5516aa94f15ab 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
@@ -33,7 +33,7 @@ define <8 x half> @fmaxnm_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
   ret <8 x half> %res
 }
 
-define void @fmaxnm_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
+define void @fmaxnm_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmaxnm_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -43,10 +43,10 @@ define void @fmaxnm_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    fmaxnm z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
-  %op2 = load <16 x half>, <16 x half>* %b
+  %op1 = load <16 x half>, ptr %a
+  %op2 = load <16 x half>, ptr %b
   %res = call <16 x half> @llvm.maxnum.v16f16(<16 x half> %op1, <16 x half> %op2)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -76,7 +76,7 @@ define <4 x float> @fmaxnm_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
   ret <4 x float> %res
 }
 
-define void @fmaxnm_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
+define void @fmaxnm_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmaxnm_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -86,10 +86,10 @@ define void @fmaxnm_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    fmaxnm z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
-  %op2 = load <8 x float>, <8 x float>* %b
+  %op1 = load <8 x float>, ptr %a
+  %op2 = load <8 x float>, ptr %b
   %res = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %op1, <8 x float> %op2)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -117,7 +117,7 @@ define <2 x double> @fmaxnm_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
   ret <2 x double> %res
 }
 
-define void @fmaxnm_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
+define void @fmaxnm_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmaxnm_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -127,10 +127,10 @@ define void @fmaxnm_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    fmaxnm z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
-  %op2 = load <4 x double>, <4 x double>* %b
+  %op1 = load <4 x double>, ptr %a
+  %op2 = load <4 x double>, ptr %b
   %res = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %op1, <4 x double> %op2)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -164,7 +164,7 @@ define <8 x half> @fminnm_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
   ret <8 x half> %res
 }
 
-define void @fminnm_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
+define void @fminnm_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fminnm_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -174,10 +174,10 @@ define void @fminnm_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    fminnm z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
-  %op2 = load <16 x half>, <16 x half>* %b
+  %op1 = load <16 x half>, ptr %a
+  %op2 = load <16 x half>, ptr %b
   %res = call <16 x half> @llvm.minnum.v16f16(<16 x half> %op1, <16 x half> %op2)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -207,7 +207,7 @@ define <4 x float> @fminnm_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
   ret <4 x float> %res
 }
 
-define void @fminnm_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
+define void @fminnm_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fminnm_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -217,10 +217,10 @@ define void @fminnm_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    fminnm z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
-  %op2 = load <8 x float>, <8 x float>* %b
+  %op1 = load <8 x float>, ptr %a
+  %op2 = load <8 x float>, ptr %b
   %res = call <8 x float> @llvm.minnum.v8f32(<8 x float> %op1, <8 x float> %op2)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -248,7 +248,7 @@ define <2 x double> @fminnm_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
   ret <2 x double> %res
 }
 
-define void @fminnm_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
+define void @fminnm_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fminnm_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -258,10 +258,10 @@ define void @fminnm_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    fminnm z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
-  %op2 = load <4 x double>, <4 x double>* %b
+  %op1 = load <4 x double>, ptr %a
+  %op2 = load <4 x double>, ptr %b
   %res = call <4 x double> @llvm.minnum.v4f64(<4 x double> %op1, <4 x double> %op2)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -295,7 +295,7 @@ define <8 x half> @fmax_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
   ret <8 x half> %res
 }
 
-define void @fmax_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
+define void @fmax_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmax_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -305,10 +305,10 @@ define void @fmax_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    fmax z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
-  %op2 = load <16 x half>, <16 x half>* %b
+  %op1 = load <16 x half>, ptr %a
+  %op2 = load <16 x half>, ptr %b
   %res = call <16 x half> @llvm.maximum.v16f16(<16 x half> %op1, <16 x half> %op2)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -338,7 +338,7 @@ define <4 x float> @fmax_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
   ret <4 x float> %res
 }
 
-define void @fmax_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
+define void @fmax_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmax_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -348,10 +348,10 @@ define void @fmax_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    fmax z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
-  %op2 = load <8 x float>, <8 x float>* %b
+  %op1 = load <8 x float>, ptr %a
+  %op2 = load <8 x float>, ptr %b
   %res = call <8 x float> @llvm.maximum.v8f32(<8 x float> %op1, <8 x float> %op2)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -379,7 +379,7 @@ define <2 x double> @fmax_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
   ret <2 x double> %res
 }
 
-define void @fmax_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
+define void @fmax_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmax_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -389,10 +389,10 @@ define void @fmax_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    fmax z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
-  %op2 = load <4 x double>, <4 x double>* %b
+  %op1 = load <4 x double>, ptr %a
+  %op2 = load <4 x double>, ptr %b
   %res = call <4 x double> @llvm.maximum.v4f64(<4 x double> %op1, <4 x double> %op2)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -426,7 +426,7 @@ define <8 x half> @fmin_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
   ret <8 x half> %res
 }
 
-define void @fmin_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
+define void @fmin_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmin_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -436,10 +436,10 @@ define void @fmin_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    fmin z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
-  %op2 = load <16 x half>, <16 x half>* %b
+  %op1 = load <16 x half>, ptr %a
+  %op2 = load <16 x half>, ptr %b
   %res = call <16 x half> @llvm.minimum.v16f16(<16 x half> %op1, <16 x half> %op2)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -469,7 +469,7 @@ define <4 x float> @fmin_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
   ret <4 x float> %res
 }
 
-define void @fmin_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
+define void @fmin_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmin_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -479,10 +479,10 @@ define void @fmin_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    fmin z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
-  %op2 = load <8 x float>, <8 x float>* %b
+  %op1 = load <8 x float>, ptr %a
+  %op2 = load <8 x float>, ptr %b
   %res = call <8 x float> @llvm.minimum.v8f32(<8 x float> %op1, <8 x float> %op2)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -510,7 +510,7 @@ define <2 x double> @fmin_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
   ret <2 x double> %res
 }
 
-define void @fmin_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
+define void @fmin_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fmin_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -520,10 +520,10 @@ define void @fmin_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    fmin z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
-  %op2 = load <4 x double>, <4 x double>* %b
+  %op1 = load <4 x double>, ptr %a
+  %op2 = load <4 x double>, ptr %b
   %res = call <4 x double> @llvm.minimum.v4f64(<4 x double> %op1, <4 x double> %op2)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
index ebf4c5bdd1bbb..dbc5817bd2d9f 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
@@ -33,7 +33,7 @@ define half @fadda_v8f16(half %start, <8 x half> %a) #0 {
   ret half %res
 }
 
-define half @fadda_v16f16(half %start, <16 x half>* %a) #0 {
+define half @fadda_v16f16(half %start, ptr %a) #0 {
 ; CHECK-LABEL: fadda_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q2, [x0]
@@ -43,7 +43,7 @@ define half @fadda_v16f16(half %start, <16 x half>* %a) #0 {
 ; CHECK-NEXT:    fadda h0, p0, h0, z2.h
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op)
   ret half %res
 }
@@ -74,7 +74,7 @@ define float @fadda_v4f32(float %start, <4 x float> %a) #0 {
   ret float %res
 }
 
-define float @fadda_v8f32(float %start, <8 x float>* %a) #0 {
+define float @fadda_v8f32(float %start, ptr %a) #0 {
 ; CHECK-LABEL: fadda_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q2, [x0]
@@ -84,7 +84,7 @@ define float @fadda_v8f32(float %start, <8 x float>* %a) #0 {
 ; CHECK-NEXT:    fadda s0, p0, s0, z2.s
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op)
   ret float %res
 }
@@ -112,7 +112,7 @@ define double @fadda_v2f64(double %start, <2 x double> %a) #0 {
   ret double %res
 }
 
-define double @fadda_v4f64(double %start, <4 x double>* %a) #0 {
+define double @fadda_v4f64(double %start, ptr %a) #0 {
 ; CHECK-LABEL: fadda_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q2, [x0]
@@ -122,7 +122,7 @@ define double @fadda_v4f64(double %start, <4 x double>* %a) #0 {
 ; CHECK-NEXT:    fadda d0, p0, d0, z2.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op)
   ret double %res
 }
@@ -155,7 +155,7 @@ define half @faddv_v8f16(half %start, <8 x half> %a) #0 {
   ret half %res
 }
 
-define half @faddv_v16f16(half %start, <16 x half>* %a) #0 {
+define half @faddv_v16f16(half %start, ptr %a) #0 {
 ; CHECK-LABEL: faddv_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q2, q1, [x0]
@@ -164,7 +164,7 @@ define half @faddv_v16f16(half %start, <16 x half>* %a) #0 {
 ; CHECK-NEXT:    faddv h1, p0, z1.h
 ; CHECK-NEXT:    fadd h0, h0, h1
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call fast half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op)
   ret half %res
 }
@@ -193,7 +193,7 @@ define float @faddv_v4f32(float %start, <4 x float> %a) #0 {
   ret float %res
 }
 
-define float @faddv_v8f32(float %start, <8 x float>* %a) #0 {
+define float @faddv_v8f32(float %start, ptr %a) #0 {
 ; CHECK-LABEL: faddv_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q2, q1, [x0]
@@ -202,7 +202,7 @@ define float @faddv_v8f32(float %start, <8 x float>* %a) #0 {
 ; CHECK-NEXT:    faddv s1, p0, z1.s
 ; CHECK-NEXT:    fadd s0, s0, s1
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call fast float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op)
   ret float %res
 }
@@ -229,7 +229,7 @@ define double @faddv_v2f64(double %start, <2 x double> %a) #0 {
   ret double %res
 }
 
-define double @faddv_v4f64(double %start, <4 x double>* %a) #0 {
+define double @faddv_v4f64(double %start, ptr %a) #0 {
 ; CHECK-LABEL: faddv_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q2, q1, [x0]
@@ -238,7 +238,7 @@ define double @faddv_v4f64(double %start, <4 x double>* %a) #0 {
 ; CHECK-NEXT:    faddv d1, p0, z1.d
 ; CHECK-NEXT:    fadd d0, d0, d1
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call fast double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op)
   ret double %res
 }
@@ -271,7 +271,7 @@ define half @fmaxv_v8f16(<8 x half> %a) #0 {
   ret half %res
 }
 
-define half @fmaxv_v16f16(<16 x half>* %a) #0 {
+define half @fmaxv_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: fmaxv_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -280,7 +280,7 @@ define half @fmaxv_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    fmaxnmv h0, p0, z0.h
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call half @llvm.vector.reduce.fmax.v16f16(<16 x half> %op)
   ret half %res
 }
@@ -309,7 +309,7 @@ define float @fmaxv_v4f32(<4 x float> %a) #0 {
   ret float %res
 }
 
-define float @fmaxv_v8f32(<8 x float>* %a) #0 {
+define float @fmaxv_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: fmaxv_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -318,7 +318,7 @@ define float @fmaxv_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    fmaxnmv s0, p0, z0.s
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %op)
   ret float %res
 }
@@ -345,7 +345,7 @@ define double @fmaxv_v2f64(<2 x double> %a) #0 {
   ret double %res
 }
 
-define double @fmaxv_v4f64(<4 x double>* %a) #0 {
+define double @fmaxv_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: fmaxv_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -354,7 +354,7 @@ define double @fmaxv_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    fmaxnmv d0, p0, z0.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %op)
   ret double %res
 }
@@ -387,7 +387,7 @@ define half @fminv_v8f16(<8 x half> %a) #0 {
   ret half %res
 }
 
-define half @fminv_v16f16(<16 x half>* %a) #0 {
+define half @fminv_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: fminv_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -396,7 +396,7 @@ define half @fminv_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    fminnmv h0, p0, z0.h
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call half @llvm.vector.reduce.fmin.v16f16(<16 x half> %op)
   ret half %res
 }
@@ -425,7 +425,7 @@ define float @fminv_v4f32(<4 x float> %a) #0 {
   ret float %res
 }
 
-define float @fminv_v8f32(<8 x float>* %a) #0 {
+define float @fminv_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: fminv_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -434,7 +434,7 @@ define float @fminv_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    fminnmv s0, p0, z0.s
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %op)
   ret float %res
 }
@@ -461,7 +461,7 @@ define double @fminv_v2f64(<2 x double> %a) #0 {
   ret double %res
 }
 
-define double @fminv_v4f64(<4 x double>* %a) #0 {
+define double @fminv_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: fminv_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -470,7 +470,7 @@ define double @fminv_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    fminnmv d0, p0, z0.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %op)
   ret double %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
index 7264e776e0f0f..50d518dc96c17 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-rounding.ll
@@ -43,7 +43,7 @@ define <8 x half> @frintp_v8f16(<8 x half> %op) #0 {
   ret <8 x half> %res
 }
 
-define void @frintp_v16f16(<16 x half>* %a) #0 {
+define void @frintp_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: frintp_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -52,9 +52,9 @@ define void @frintp_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    frintp z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call <16 x half> @llvm.ceil.v16f16(<16 x half> %op)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -82,7 +82,7 @@ define <4 x float> @frintp_v4f32(<4 x float> %op) #0 {
   ret <4 x float> %res
 }
 
-define void @frintp_v8f32(<8 x float>* %a) #0 {
+define void @frintp_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: frintp_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -91,9 +91,9 @@ define void @frintp_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    frintp z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call <8 x float> @llvm.ceil.v8f32(<8 x float> %op)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -119,7 +119,7 @@ define <2 x double> @frintp_v2f64(<2 x double> %op) #0 {
   ret <2 x double> %res
 }
 
-define void @frintp_v4f64(<4 x double>* %a) #0 {
+define void @frintp_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: frintp_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -128,9 +128,9 @@ define void @frintp_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    frintp z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call <4 x double> @llvm.ceil.v4f64(<4 x double> %op)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -174,7 +174,7 @@ define <8 x half> @frintm_v8f16(<8 x half> %op) #0 {
   ret <8 x half> %res
 }
 
-define void @frintm_v16f16(<16 x half>* %a) #0 {
+define void @frintm_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: frintm_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -183,9 +183,9 @@ define void @frintm_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    frintm z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call <16 x half> @llvm.floor.v16f16(<16 x half> %op)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -213,7 +213,7 @@ define <4 x float> @frintm_v4f32(<4 x float> %op) #0 {
   ret <4 x float> %res
 }
 
-define void @frintm_v8f32(<8 x float>* %a) #0 {
+define void @frintm_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: frintm_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -222,9 +222,9 @@ define void @frintm_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    frintm z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call <8 x float> @llvm.floor.v8f32(<8 x float> %op)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -250,7 +250,7 @@ define <2 x double> @frintm_v2f64(<2 x double> %op) #0 {
   ret <2 x double> %res
 }
 
-define void @frintm_v4f64(<4 x double>* %a) #0 {
+define void @frintm_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: frintm_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -259,9 +259,9 @@ define void @frintm_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    frintm z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call <4 x double> @llvm.floor.v4f64(<4 x double> %op)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -305,7 +305,7 @@ define <8 x half> @frinti_v8f16(<8 x half> %op) #0 {
   ret <8 x half> %res
 }
 
-define void @frinti_v16f16(<16 x half>* %a) #0 {
+define void @frinti_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: frinti_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -314,9 +314,9 @@ define void @frinti_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    frinti z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call <16 x half> @llvm.nearbyint.v16f16(<16 x half> %op)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -344,7 +344,7 @@ define <4 x float> @frinti_v4f32(<4 x float> %op) #0 {
   ret <4 x float> %res
 }
 
-define void @frinti_v8f32(<8 x float>* %a) #0 {
+define void @frinti_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: frinti_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -353,9 +353,9 @@ define void @frinti_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    frinti z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %op)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -381,7 +381,7 @@ define <2 x double> @frinti_v2f64(<2 x double> %op) #0 {
   ret <2 x double> %res
 }
 
-define void @frinti_v4f64(<4 x double>* %a) #0 {
+define void @frinti_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: frinti_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -390,9 +390,9 @@ define void @frinti_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    frinti z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %op)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -436,7 +436,7 @@ define <8 x half> @frintx_v8f16(<8 x half> %op) #0 {
   ret <8 x half> %res
 }
 
-define void @frintx_v16f16(<16 x half>* %a) #0 {
+define void @frintx_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: frintx_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -445,9 +445,9 @@ define void @frintx_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call <16 x half> @llvm.rint.v16f16(<16 x half> %op)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -475,7 +475,7 @@ define <4 x float> @frintx_v4f32(<4 x float> %op) #0 {
   ret <4 x float> %res
 }
 
-define void @frintx_v8f32(<8 x float>* %a) #0 {
+define void @frintx_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: frintx_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -484,9 +484,9 @@ define void @frintx_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call <8 x float> @llvm.rint.v8f32(<8 x float> %op)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -512,7 +512,7 @@ define <2 x double> @frintx_v2f64(<2 x double> %op) #0 {
   ret <2 x double> %res
 }
 
-define void @frintx_v4f64(<4 x double>* %a) #0 {
+define void @frintx_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: frintx_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -521,9 +521,9 @@ define void @frintx_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call <4 x double> @llvm.rint.v4f64(<4 x double> %op)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -567,7 +567,7 @@ define <8 x half> @frinta_v8f16(<8 x half> %op) #0 {
   ret <8 x half> %res
 }
 
-define void @frinta_v16f16(<16 x half>* %a) #0 {
+define void @frinta_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: frinta_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -576,9 +576,9 @@ define void @frinta_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    frinta z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call <16 x half> @llvm.round.v16f16(<16 x half> %op)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -606,7 +606,7 @@ define <4 x float> @frinta_v4f32(<4 x float> %op) #0 {
   ret <4 x float> %res
 }
 
-define void @frinta_v8f32(<8 x float>* %a) #0 {
+define void @frinta_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: frinta_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -615,9 +615,9 @@ define void @frinta_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    frinta z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call <8 x float> @llvm.round.v8f32(<8 x float> %op)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -643,7 +643,7 @@ define <2 x double> @frinta_v2f64(<2 x double> %op) #0 {
   ret <2 x double> %res
 }
 
-define void @frinta_v4f64(<4 x double>* %a) #0 {
+define void @frinta_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: frinta_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -652,9 +652,9 @@ define void @frinta_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    frinta z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call <4 x double> @llvm.round.v4f64(<4 x double> %op)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -698,7 +698,7 @@ define <8 x half> @frintn_v8f16(<8 x half> %op) #0 {
   ret <8 x half> %res
 }
 
-define void @frintn_v16f16(<16 x half>* %a) #0 {
+define void @frintn_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: frintn_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -707,9 +707,9 @@ define void @frintn_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    frintn z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call <16 x half> @llvm.roundeven.v16f16(<16 x half> %op)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -737,7 +737,7 @@ define <4 x float> @frintn_v4f32(<4 x float> %op) #0 {
   ret <4 x float> %res
 }
 
-define void @frintn_v8f32(<8 x float>* %a) #0 {
+define void @frintn_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: frintn_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -746,9 +746,9 @@ define void @frintn_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    frintn z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %op)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -774,7 +774,7 @@ define <2 x double> @frintn_v2f64(<2 x double> %op) #0 {
   ret <2 x double> %res
 }
 
-define void @frintn_v4f64(<4 x double>* %a) #0 {
+define void @frintn_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: frintn_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -783,9 +783,9 @@ define void @frintn_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    frintn z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %op)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 
@@ -829,7 +829,7 @@ define <8 x half> @frintz_v8f16(<8 x half> %op) #0 {
   ret <8 x half> %res
 }
 
-define void @frintz_v16f16(<16 x half>* %a) #0 {
+define void @frintz_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: frintz_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -838,9 +838,9 @@ define void @frintz_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    frintz z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x half>, <16 x half>* %a
+  %op = load <16 x half>, ptr %a
   %res = call <16 x half> @llvm.trunc.v16f16(<16 x half> %op)
-  store <16 x half> %res, <16 x half>* %a
+  store <16 x half> %res, ptr %a
   ret void
 }
 
@@ -868,7 +868,7 @@ define <4 x float> @frintz_v4f32(<4 x float> %op) #0 {
   ret <4 x float> %res
 }
 
-define void @frintz_v8f32(<8 x float>* %a) #0 {
+define void @frintz_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: frintz_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -877,9 +877,9 @@ define void @frintz_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    frintz z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x float>, <8 x float>* %a
+  %op = load <8 x float>, ptr %a
   %res = call <8 x float> @llvm.trunc.v8f32(<8 x float> %op)
-  store <8 x float> %res, <8 x float>* %a
+  store <8 x float> %res, ptr %a
   ret void
 }
 
@@ -905,7 +905,7 @@ define <2 x double> @frintz_v2f64(<2 x double> %op) #0 {
   ret <2 x double> %res
 }
 
-define void @frintz_v4f64(<4 x double>* %a) #0 {
+define void @frintz_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: frintz_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -914,9 +914,9 @@ define void @frintz_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    frintz z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x double>, <4 x double>* %a
+  %op = load <4 x double>, ptr %a
   %res = call <4 x double> @llvm.trunc.v4f64(<4 x double> %op)
-  store <4 x double> %res, <4 x double>* %a
+  store <4 x double> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
index 7289d740160a4..88b4038c33082 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
@@ -19,7 +19,7 @@ define <4 x i16> @fcvtzu_v4f16_v4i16(<4 x half> %op1) #0 {
   ret <4 x i16> %res
 }
 
-define void @fcvtzu_v8f16_v8i16(<8 x half>* %a, <8 x i16>* %b) #0 {
+define void @fcvtzu_v8f16_v8i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v8f16_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -27,13 +27,13 @@ define void @fcvtzu_v8f16_v8i16(<8 x half>* %a, <8 x i16>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z0.h, p0/m, z0.h
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fptoui <8 x half> %op1 to <8 x i16>
-  store <8 x i16> %res, <8 x i16>* %b
+  store <8 x i16> %res, ptr %b
   ret void
 }
 
-define void @fcvtzu_v16f16_v16i16(<16 x half>* %a, <16 x i16>* %b) #0 {
+define void @fcvtzu_v16f16_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v16f16_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -42,9 +42,9 @@ define void @fcvtzu_v16f16_v16i16(<16 x half>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fptoui <16 x half> %op1 to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
@@ -78,7 +78,7 @@ define <4 x i32> @fcvtzu_v4f16_v4i32(<4 x half> %op1) #0 {
   ret <4 x i32> %res
 }
 
-define void @fcvtzu_v8f16_v8i32(<8 x half>* %a, <8 x i32>* %b) #0 {
+define void @fcvtzu_v8f16_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v8f16_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -90,13 +90,13 @@ define void @fcvtzu_v8f16_v8i32(<8 x half>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z0.s, p0/m, z0.h
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fptoui <8 x half> %op1 to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
-define void @fcvtzu_v16f16_v16i32(<16 x half>* %a, <16 x i32>* %b) #0 {
+define void @fcvtzu_v16f16_v16i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v16f16_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -115,9 +115,9 @@ define void @fcvtzu_v16f16_v16i32(<16 x half>* %a, <16 x i32>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z1.s, p0/m, z2.h
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fptoui <16 x half> %op1 to <16 x i32>
-  store <16 x i32> %res, <16 x i32>* %b
+  store <16 x i32> %res, ptr %b
   ret void
 }
 
@@ -150,7 +150,7 @@ define <2 x i64> @fcvtzu_v2f16_v2i64(<2 x half> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @fcvtzu_v4f16_v4i64(<4 x half>* %a, <4 x i64>* %b) #0 {
+define void @fcvtzu_v4f16_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v4f16_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -168,13 +168,13 @@ define void @fcvtzu_v4f16_v4i64(<4 x half>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x half>, <4 x half>* %a
+  %op1 = load <4 x half>, ptr %a
   %res = fptoui <4 x half> %op1 to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 
-define void @fcvtzu_v8f16_v8i64(<8 x half>* %a, <8 x i64>* %b) #0 {
+define void @fcvtzu_v8f16_v8i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v8f16_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -205,13 +205,13 @@ define void @fcvtzu_v8f16_v8i64(<8 x half>* %a, <8 x i64>* %b) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x1, #32]
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fptoui <8 x half> %op1 to <8 x i64>
-  store <8 x i64> %res, <8 x i64>* %b
+  store <8 x i64> %res, ptr %b
   ret void
 }
 
-define void @fcvtzu_v16f16_v16i64(<16 x half>* %a, <16 x i64>* %b) #0 {
+define void @fcvtzu_v16f16_v16i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v16f16_v16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #128
@@ -265,9 +265,9 @@ define void @fcvtzu_v16f16_v16i64(<16 x half>* %a, <16 x i64>* %b) #0 {
 ; CHECK-NEXT:    stp q7, q6, [x1, #64]
 ; CHECK-NEXT:    add sp, sp, #128
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fptoui <16 x half> %op1 to <16 x i64>
-  store <16 x i64> %res, <16 x i64>* %b
+  store <16 x i64> %res, ptr %b
   ret void
 }
 
@@ -300,7 +300,7 @@ define <4 x i16> @fcvtzu_v4f32_v4i16(<4 x float> %op1) #0 {
   ret <4 x i16> %res
 }
 
-define <8 x i16> @fcvtzu_v8f32_v8i16(<8 x float>* %a) #0 {
+define <8 x i16> @fcvtzu_v8f32_v8i16(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzu_v8f32_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -313,12 +313,12 @@ define <8 x i16> @fcvtzu_v8f32_v8i16(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fptoui <8 x float> %op1 to <8 x i16>
   ret <8 x i16> %res
 }
 
-define void @fcvtzu_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 {
+define void @fcvtzu_v16f32_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v16f32_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -337,9 +337,9 @@ define void @fcvtzu_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    splice z3.h, p1, z3.h, z2.h
 ; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x float>, <16 x float>* %a
+  %op1 = load <16 x float>, ptr %a
   %res = fptoui <16 x float> %op1 to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
@@ -371,7 +371,7 @@ define <4 x i32> @fcvtzu_v4f32_v4i32(<4 x float> %op1) #0 {
   ret <4 x i32> %res
 }
 
-define void @fcvtzu_v8f32_v8i32(<8 x float>* %a, <8 x i32>* %b) #0 {
+define void @fcvtzu_v8f32_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v8f32_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -380,9 +380,9 @@ define void @fcvtzu_v8f32_v8i32(<8 x float>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fptoui <8 x float> %op1 to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
@@ -416,7 +416,7 @@ define <2 x i64> @fcvtzu_v2f32_v2i64(<2 x float> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @fcvtzu_v4f32_v4i64(<4 x float>* %a, <4 x i64>* %b) #0 {
+define void @fcvtzu_v4f32_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v4f32_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -428,13 +428,13 @@ define void @fcvtzu_v4f32_v4i64(<4 x float>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z0.d, p0/m, z0.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x float>, <4 x float>* %a
+  %op1 = load <4 x float>, ptr %a
   %res = fptoui <4 x float> %op1 to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 
-define void @fcvtzu_v8f32_v8i64(<8 x float>* %a, <8 x i64>* %b) #0 {
+define void @fcvtzu_v8f32_v8i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v8f32_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -453,9 +453,9 @@ define void @fcvtzu_v8f32_v8i64(<8 x float>* %a, <8 x i64>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z1.d, p0/m, z2.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fptoui <8 x float> %op1 to <8 x i64>
-  store <8 x i64> %res, <8 x i64>* %b
+  store <8 x i64> %res, ptr %b
   ret void
 }
 
@@ -488,7 +488,7 @@ define <2 x i16> @fcvtzu_v2f64_v2i16(<2 x double> %op1) #0 {
   ret <2 x i16> %res
 }
 
-define <4 x i16> @fcvtzu_v4f64_v4i16(<4 x double>* %a) #0 {
+define <4 x i16> @fcvtzu_v4f64_v4i16(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzu_v4f64_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -512,12 +512,12 @@ define <4 x i16> @fcvtzu_v4f64_v4i16(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    ldr d0, [sp, #8]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptoui <4 x double> %op1 to <4 x i16>
   ret <4 x i16> %res
 }
 
-define <8 x i16> @fcvtzu_v8f64_v8i16(<8 x double>* %a) #0 {
+define <8 x i16> @fcvtzu_v8f64_v8i16(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzu_v8f64_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -555,12 +555,12 @@ define <8 x i16> @fcvtzu_v8f64_v8i16(<8 x double>* %a) #0 {
 ; CHECK-NEXT:    strh w9, [sp, #2]
 ; CHECK-NEXT:    ldr q0, [sp], #16
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x double>, <8 x double>* %a
+  %op1 = load <8 x double>, ptr %a
   %res = fptoui <8 x double> %op1 to <8 x i16>
   ret <8 x i16> %res
 }
 
-define void @fcvtzu_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
+define void @fcvtzu_v16f64_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v16f64_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -631,9 +631,9 @@ define void @fcvtzu_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x double>, <16 x double>* %a
+  %op1 = load <16 x double>, ptr %a
   %res = fptoui <16 x double> %op1 to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
@@ -667,7 +667,7 @@ define <2 x i32> @fcvtzu_v2f64_v2i32(<2 x double> %op1) #0 {
   ret <2 x i32> %res
 }
 
-define <4 x i32> @fcvtzu_v4f64_v4i32(<4 x double>* %a) #0 {
+define <4 x i32> @fcvtzu_v4f64_v4i32(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzu_v4f64_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -680,12 +680,12 @@ define <4 x i32> @fcvtzu_v4f64_v4i32(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptoui <4 x double> %op1 to <4 x i32>
   ret <4 x i32> %res
 }
 
-define void @fcvtzu_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 {
+define void @fcvtzu_v8f64_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v8f64_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -704,9 +704,9 @@ define void @fcvtzu_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    splice z3.s, p1, z3.s, z2.s
 ; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x double>, <8 x double>* %a
+  %op1 = load <8 x double>, ptr %a
   %res = fptoui <8 x double> %op1 to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
@@ -738,7 +738,7 @@ define <2 x i64> @fcvtzu_v2f64_v2i64(<2 x double> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @fcvtzu_v4f64_v4i64(<4 x double>* %a, <4 x i64>* %b) #0 {
+define void @fcvtzu_v4f64_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzu_v4f64_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -747,9 +747,9 @@ define void @fcvtzu_v4f64_v4i64(<4 x double>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    fcvtzu z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptoui <4 x double> %op1 to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 
@@ -769,7 +769,7 @@ define <4 x i16> @fcvtzs_v4f16_v4i16(<4 x half> %op1) #0 {
   ret <4 x i16> %res
 }
 
-define void @fcvtzs_v8f16_v8i16(<8 x half>* %a, <8 x i16>* %b) #0 {
+define void @fcvtzs_v8f16_v8i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v8f16_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -777,13 +777,13 @@ define void @fcvtzs_v8f16_v8i16(<8 x half>* %a, <8 x i16>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z0.h, p0/m, z0.h
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fptosi <8 x half> %op1 to <8 x i16>
-  store <8 x i16> %res, <8 x i16>* %b
+  store <8 x i16> %res, ptr %b
   ret void
 }
 
-define void @fcvtzs_v16f16_v16i16(<16 x half>* %a, <16 x i16>* %b) #0 {
+define void @fcvtzs_v16f16_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v16f16_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -792,9 +792,9 @@ define void @fcvtzs_v16f16_v16i16(<16 x half>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fptosi <16 x half> %op1 to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
@@ -828,7 +828,7 @@ define <4 x i32> @fcvtzs_v4f16_v4i32(<4 x half> %op1) #0 {
   ret <4 x i32> %res
 }
 
-define void @fcvtzs_v8f16_v8i32(<8 x half>* %a, <8 x i32>* %b) #0 {
+define void @fcvtzs_v8f16_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v8f16_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -840,13 +840,13 @@ define void @fcvtzs_v8f16_v8i32(<8 x half>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z0.s, p0/m, z0.h
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fptosi <8 x half> %op1 to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
-define void @fcvtzs_v16f16_v16i32(<16 x half>* %a, <16 x i32>* %b) #0 {
+define void @fcvtzs_v16f16_v16i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v16f16_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -865,9 +865,9 @@ define void @fcvtzs_v16f16_v16i32(<16 x half>* %a, <16 x i32>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z2.h
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fptosi <16 x half> %op1 to <16 x i32>
-  store <16 x i32> %res, <16 x i32>* %b
+  store <16 x i32> %res, ptr %b
   ret void
 }
 
@@ -901,7 +901,7 @@ define <2 x i64> @fcvtzs_v2f16_v2i64(<2 x half> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @fcvtzs_v4f16_v4i64(<4 x half>* %a, <4 x i64>* %b) #0 {
+define void @fcvtzs_v4f16_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v4f16_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -919,13 +919,13 @@ define void @fcvtzs_v4f16_v4i64(<4 x half>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x half>, <4 x half>* %a
+  %op1 = load <4 x half>, ptr %a
   %res = fptosi <4 x half> %op1 to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 
-define void @fcvtzs_v8f16_v8i64(<8 x half>* %a, <8 x i64>* %b) #0 {
+define void @fcvtzs_v8f16_v8i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v8f16_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #64
@@ -956,13 +956,13 @@ define void @fcvtzs_v8f16_v8i64(<8 x half>* %a, <8 x i64>* %b) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x1, #32]
 ; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x half>, <8 x half>* %a
+  %op1 = load <8 x half>, ptr %a
   %res = fptosi <8 x half> %op1 to <8 x i64>
-  store <8 x i64> %res, <8 x i64>* %b
+  store <8 x i64> %res, ptr %b
   ret void
 }
 
-define void @fcvtzs_v16f16_v16i64(<16 x half>* %a, <16 x i64>* %b) #0 {
+define void @fcvtzs_v16f16_v16i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v16f16_v16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #128
@@ -1016,9 +1016,9 @@ define void @fcvtzs_v16f16_v16i64(<16 x half>* %a, <16 x i64>* %b) #0 {
 ; CHECK-NEXT:    stp q7, q6, [x1, #64]
 ; CHECK-NEXT:    add sp, sp, #128
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x half>, <16 x half>* %a
+  %op1 = load <16 x half>, ptr %a
   %res = fptosi <16 x half> %op1 to <16 x i64>
-  store <16 x i64> %res, <16 x i64>* %b
+  store <16 x i64> %res, ptr %b
   ret void
 }
 
@@ -1051,7 +1051,7 @@ define <4 x i16> @fcvtzs_v4f32_v4i16(<4 x float> %op1) #0 {
   ret <4 x i16> %res
 }
 
-define <8 x i16> @fcvtzs_v8f32_v8i16(<8 x float>* %a) #0 {
+define <8 x i16> @fcvtzs_v8f32_v8i16(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzs_v8f32_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -1064,12 +1064,12 @@ define <8 x i16> @fcvtzs_v8f32_v8i16(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fptosi <8 x float> %op1 to <8 x i16>
   ret <8 x i16> %res
 }
 
-define void @fcvtzs_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 {
+define void @fcvtzs_v16f32_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v16f32_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -1088,9 +1088,9 @@ define void @fcvtzs_v16f32_v16i16(<16 x float>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    splice z3.h, p1, z3.h, z2.h
 ; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x float>, <16 x float>* %a
+  %op1 = load <16 x float>, ptr %a
   %res = fptosi <16 x float> %op1 to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
@@ -1122,7 +1122,7 @@ define <4 x i32> @fcvtzs_v4f32_v4i32(<4 x float> %op1) #0 {
   ret <4 x i32> %res
 }
 
-define void @fcvtzs_v8f32_v8i32(<8 x float>* %a, <8 x i32>* %b) #0 {
+define void @fcvtzs_v8f32_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v8f32_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -1131,9 +1131,9 @@ define void @fcvtzs_v8f32_v8i32(<8 x float>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fptosi <8 x float> %op1 to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
@@ -1167,7 +1167,7 @@ define <2 x i64> @fcvtzs_v2f32_v2i64(<2 x float> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @fcvtzs_v4f32_v4i64(<4 x float>* %a, <4 x i64>* %b) #0 {
+define void @fcvtzs_v4f32_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v4f32_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -1179,13 +1179,13 @@ define void @fcvtzs_v4f32_v4i64(<4 x float>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x float>, <4 x float>* %a
+  %op1 = load <4 x float>, ptr %a
   %res = fptosi <4 x float> %op1 to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 
-define void @fcvtzs_v8f32_v8i64(<8 x float>* %a, <8 x i64>* %b) #0 {
+define void @fcvtzs_v8f32_v8i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v8f32_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -1204,9 +1204,9 @@ define void @fcvtzs_v8f32_v8i64(<8 x float>* %a, <8 x i64>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x float>, <8 x float>* %a
+  %op1 = load <8 x float>, ptr %a
   %res = fptosi <8 x float> %op1 to <8 x i64>
-  store <8 x i64> %res, <8 x i64>* %b
+  store <8 x i64> %res, ptr %b
   ret void
 }
 
@@ -1241,7 +1241,7 @@ define <2 x i16> @fcvtzs_v2f64_v2i16(<2 x double> %op1) #0 {
   ret <2 x i16> %res
 }
 
-define <4 x i16> @fcvtzs_v4f64_v4i16(<4 x double>* %a) #0 {
+define <4 x i16> @fcvtzs_v4f64_v4i16(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzs_v4f64_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -1265,12 +1265,12 @@ define <4 x i16> @fcvtzs_v4f64_v4i16(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    ldr d0, [sp, #8]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptosi <4 x double> %op1 to <4 x i16>
   ret <4 x i16> %res
 }
 
-define <8 x i16> @fcvtzs_v8f64_v8i16(<8 x double>* %a) #0 {
+define <8 x i16> @fcvtzs_v8f64_v8i16(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzs_v8f64_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -1308,12 +1308,12 @@ define <8 x i16> @fcvtzs_v8f64_v8i16(<8 x double>* %a) #0 {
 ; CHECK-NEXT:    strh w9, [sp, #2]
 ; CHECK-NEXT:    ldr q0, [sp], #16
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x double>, <8 x double>* %a
+  %op1 = load <8 x double>, ptr %a
   %res = fptosi <8 x double> %op1 to <8 x i16>
   ret <8 x i16> %res
 }
 
-define void @fcvtzs_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
+define void @fcvtzs_v16f64_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v16f64_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -1384,9 +1384,9 @@ define void @fcvtzs_v16f64_v16i16(<16 x double>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x double>, <16 x double>* %a
+  %op1 = load <16 x double>, ptr %a
   %res = fptosi <16 x double> %op1 to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %b
+  store <16 x i16> %res, ptr %b
   ret void
 }
 
@@ -1420,7 +1420,7 @@ define <2 x i32> @fcvtzs_v2f64_v2i32(<2 x double> %op1) #0 {
   ret <2 x i32> %res
 }
 
-define <4 x i32> @fcvtzs_v4f64_v4i32(<4 x double>* %a) #0 {
+define <4 x i32> @fcvtzs_v4f64_v4i32(ptr %a) #0 {
 ; CHECK-LABEL: fcvtzs_v4f64_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -1433,12 +1433,12 @@ define <4 x i32> @fcvtzs_v4f64_v4i32(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptosi <4 x double> %op1 to <4 x i32>
   ret <4 x i32> %res
 }
 
-define void @fcvtzs_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 {
+define void @fcvtzs_v8f64_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v8f64_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -1457,9 +1457,9 @@ define void @fcvtzs_v8f64_v8i32(<8 x double>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    splice z3.s, p1, z3.s, z2.s
 ; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x double>, <8 x double>* %a
+  %op1 = load <8 x double>, ptr %a
   %res = fptosi <8 x double> %op1 to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %b
+  store <8 x i32> %res, ptr %b
   ret void
 }
 
@@ -1491,7 +1491,7 @@ define <2 x i64> @fcvtzs_v2f64_v2i64(<2 x double> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @fcvtzs_v4f64_v4i64(<4 x double>* %a, <4 x i64>* %b) #0 {
+define void @fcvtzs_v4f64_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvtzs_v4f64_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -1500,9 +1500,9 @@ define void @fcvtzs_v4f64_v4i64(<4 x double>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    fcvtzs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x double>, <4 x double>* %a
+  %op1 = load <4 x double>, ptr %a
   %res = fptosi <4 x double> %op1 to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %b
+  store <4 x i64> %res, ptr %b
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
index 093acf0ac8483..820114fc30dc4 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
@@ -180,7 +180,7 @@ define <4 x i32> @insertelement_v4i32(<4 x i32> %op1) #0 {
     ret <4 x i32> %r
 }
 
-define <8 x i32> @insertelement_v8i32(<8 x i32>* %a) #0 {
+define <8 x i32> @insertelement_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: insertelement_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #3
@@ -193,7 +193,7 @@ define <8 x i32> @insertelement_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    mov z1.s, p0/m, w8
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-    %op1 = load <8 x i32>, <8 x i32>* %a
+    %op1 = load <8 x i32>, ptr %a
     %r = insertelement <8 x i32> %op1, i32 5, i64 7
     ret <8 x i32> %r
 }
@@ -226,7 +226,7 @@ define <2 x i64> @insertelement_v2i64(<2 x i64> %op1) #0 {
     ret <2 x i64> %r
 }
 
-define <4 x i64> @insertelement_v4i64(<4 x i64>* %a) #0 {
+define <4 x i64> @insertelement_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: insertelement_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
@@ -239,7 +239,7 @@ define <4 x i64> @insertelement_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    mov z1.d, p0/m, x8
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-    %op1 = load <4 x i64>, <4 x i64>* %a
+    %op1 = load <4 x i64>, ptr %a
     %r = insertelement <4 x i64> %op1, i64 5, i64 3
     ret <4 x i64> %r
 }
@@ -295,7 +295,7 @@ define <8 x half> @insertelement_v8f16(<8 x half> %op1) #0 {
     ret <8 x half> %r
 }
 
-define <16 x half> @insertelement_v16f16(<16 x half>* %a) #0 {
+define <16 x half> @insertelement_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: insertelement_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -308,7 +308,7 @@ define <16 x half> @insertelement_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    mov z1.h, p0/m, h3
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-    %op1 = load <16 x half>, <16 x half>* %a
+    %op1 = load <16 x half>, ptr %a
     %r = insertelement <16 x half> %op1, half 5.0, i64 15
     ret <16 x half> %r
 }
@@ -348,7 +348,7 @@ define <4 x float> @insertelement_v4f32(<4 x float> %op1) #0 {
     ret <4 x float> %r
 }
 
-define <8 x float> @insertelement_v8f32(<8 x float>* %a) #0 {
+define <8 x float> @insertelement_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: insertelement_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -361,7 +361,7 @@ define <8 x float> @insertelement_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    mov z1.s, p0/m, s4
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-    %op1 = load <8 x float>, <8 x float>* %a
+    %op1 = load <8 x float>, ptr %a
     %r = insertelement <8 x float> %op1, float 5.0, i64 7
     ret <8 x float> %r
 }
@@ -393,7 +393,7 @@ define <2 x double> @insertelement_v2f64(<2 x double> %op1) #0 {
     ret <2 x double> %r
 }
 
-define <4 x double> @insertelement_v4f64(<4 x double>* %a) #0 {
+define <4 x double> @insertelement_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: insertelement_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -406,7 +406,7 @@ define <4 x double> @insertelement_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    mov z1.d, p0/m, d3
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-    %op1 = load <4 x double>, <4 x double>* %a
+    %op1 = load <4 x double>, ptr %a
     %r = insertelement <4 x double> %op1, double 5.0, i64 3
     ret <4 x double> %r
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
index f90afcd99970a..7a7d175931713 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
@@ -42,7 +42,7 @@ define <16 x i8> @add_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @add_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @add_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: add_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -51,10 +51,10 @@ define void @add_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    add z1.b, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = add <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -94,7 +94,7 @@ define <8 x i16> @add_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @add_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @add_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: add_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -103,10 +103,10 @@ define void @add_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    add z1.h, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = add <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -134,7 +134,7 @@ define <4 x i32> @add_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @add_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @add_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: add_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -143,10 +143,10 @@ define void @add_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    add z1.s, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = add <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -174,7 +174,7 @@ define <2 x i64> @add_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @add_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @add_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: add_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -183,10 +183,10 @@ define void @add_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    add z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = add <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -233,7 +233,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @mul_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @mul_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: mul_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -243,10 +243,10 @@ define void @mul_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    mul z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = mul <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -289,7 +289,7 @@ define <8 x i16> @mul_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @mul_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @mul_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: mul_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -299,10 +299,10 @@ define void @mul_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    mul z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = mul <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -332,7 +332,7 @@ define <4 x i32> @mul_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @mul_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @mul_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: mul_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -342,10 +342,10 @@ define void @mul_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    mul z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = mul <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -375,7 +375,7 @@ define <2 x i64> @mul_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @mul_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @mul_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: mul_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -385,10 +385,10 @@ define void @mul_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    mul z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = mul <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -432,7 +432,7 @@ define <16 x i8> @sub_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @sub_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @sub_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: sub_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -441,10 +441,10 @@ define void @sub_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    sub z1.b, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = sub <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -484,7 +484,7 @@ define <8 x i16> @sub_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @sub_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @sub_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: sub_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -493,10 +493,10 @@ define void @sub_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    sub z1.h, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = sub <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -524,7 +524,7 @@ define <4 x i32> @sub_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @sub_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @sub_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: sub_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -533,10 +533,10 @@ define void @sub_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    sub z1.s, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = sub <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -564,7 +564,7 @@ define <2 x i64> @sub_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @sub_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @sub_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: sub_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -573,10 +573,10 @@ define void @sub_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    sub z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = sub <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -622,7 +622,7 @@ define <16 x i8> @abs_v16i8(<16 x i8> %op1) #0 {
   ret <16 x i8> %res
 }
 
-define void @abs_v32i8(<32 x i8>* %a) #0 {
+define void @abs_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: abs_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -631,9 +631,9 @@ define void @abs_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    abs z1.b, p0/m, z1.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
+  %op1 = load <32 x i8>, ptr %a
   %res = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %op1, i1 false)
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -675,7 +675,7 @@ define <8 x i16> @abs_v8i16(<8 x i16> %op1) #0 {
   ret <8 x i16> %res
 }
 
-define void @abs_v16i16(<16 x i16>* %a) #0 {
+define void @abs_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: abs_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -684,9 +684,9 @@ define void @abs_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    abs z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %op1, i1 false)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -714,7 +714,7 @@ define <4 x i32> @abs_v4i32(<4 x i32> %op1) #0 {
   ret <4 x i32> %res
 }
 
-define void @abs_v8i32(<8 x i32>* %a) #0 {
+define void @abs_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: abs_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -723,9 +723,9 @@ define void @abs_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    abs z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %op1, i1 false)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -753,7 +753,7 @@ define <2 x i64> @abs_v2i64(<2 x i64> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @abs_v4i64(<4 x i64>* %a) #0 {
+define void @abs_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: abs_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -762,9 +762,9 @@ define void @abs_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    abs z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %op1, i1 false)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
index b6547cef81b99..c915351fea932 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
@@ -103,7 +103,7 @@ define <16 x i8> @sdiv_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @sdiv_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @sdiv_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: sdiv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q2, [x0]
@@ -177,10 +177,10 @@ define void @sdiv_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    splice z2.b, p2, z2.b, z4.b
 ; CHECK-NEXT:    stp q1, q2, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = sdiv <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -243,7 +243,7 @@ define <8 x i16> @sdiv_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @sdiv_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @sdiv_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: sdiv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q3, q0, [x1]
@@ -275,10 +275,10 @@ define void @sdiv_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    splice z3.h, p1, z3.h, z0.h
 ; CHECK-NEXT:    stp q2, q3, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = sdiv <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -308,7 +308,7 @@ define <4 x i32> @sdiv_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @sdiv_v8i32(<8 x i32>* %a, <8 x i32>* %b)  #0 {
+define void @sdiv_v8i32(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: sdiv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -318,10 +318,10 @@ define void @sdiv_v8i32(<8 x i32>* %a, <8 x i32>* %b)  #0 {
 ; CHECK-NEXT:    sdiv z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = sdiv <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -351,7 +351,7 @@ define <2 x i64> @sdiv_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @sdiv_v4i64(<4 x i64>* %a, <4 x i64>* %b)  #0 {
+define void @sdiv_v4i64(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: sdiv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -361,10 +361,10 @@ define void @sdiv_v4i64(<4 x i64>* %a, <4 x i64>* %b)  #0 {
 ; CHECK-NEXT:    sdiv z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = sdiv <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -465,7 +465,7 @@ define <16 x i8> @udiv_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @udiv_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @udiv_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: udiv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q2, [x0]
@@ -539,10 +539,10 @@ define void @udiv_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    splice z2.b, p2, z2.b, z4.b
 ; CHECK-NEXT:    stp q1, q2, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = udiv <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -603,7 +603,7 @@ define <8 x i16> @udiv_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @udiv_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @udiv_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: udiv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q3, q0, [x1]
@@ -635,10 +635,10 @@ define void @udiv_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    splice z3.h, p1, z3.h, z0.h
 ; CHECK-NEXT:    stp q2, q3, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = udiv <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -668,7 +668,7 @@ define <4 x i32> @udiv_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @udiv_v8i32(<8 x i32>* %a, <8 x i32>* %b)  #0 {
+define void @udiv_v8i32(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: udiv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -678,10 +678,10 @@ define void @udiv_v8i32(<8 x i32>* %a, <8 x i32>* %b)  #0 {
 ; CHECK-NEXT:    udiv z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = udiv <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -711,7 +711,7 @@ define <2 x i64> @udiv_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @udiv_v4i64(<4 x i64>* %a, <4 x i64>* %b)  #0 {
+define void @udiv_v4i64(ptr %a, ptr %b)  #0 {
 ; CHECK-LABEL: udiv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -721,14 +721,14 @@ define void @udiv_v4i64(<4 x i64>* %a, <4 x i64>* %b)  #0 {
 ; CHECK-NEXT:    udiv z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = udiv <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
-define void @udiv_constantsplat_v8i32(<8 x i32>* %a)  #0 {
+define void @udiv_constantsplat_v8i32(ptr %a)  #0 {
 ; CHECK-LABEL: udiv_constantsplat_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -749,9 +749,9 @@ define void @udiv_constantsplat_v8i32(<8 x i32>* %a)  #0 {
 ; CHECK-NEXT:    lsr z1.s, p0/m, z1.s, #6
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = udiv <8 x i32> %op1, <i32 95, i32 95, i32 95, i32 95, i32 95, i32 95, i32 95, i32 95>
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
index fcbb37b77425c..a59306f3b78a7 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-extends.ll
@@ -10,7 +10,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; NOTE: Covers the scenario where a SIGN_EXTEND_INREG is required, whose inreg
 ; type's element type is not byte based and thus cannot be lowered directly to
 ; an SVE instruction.
-define void @sext_v8i1_v8i32(<8 x i1> %a, <8 x i32>* %out) #0 {
+define void @sext_v8i1_v8i32(<8 x i1> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v8i1_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -26,7 +26,7 @@ define void @sext_v8i1_v8i32(<8 x i1> %a, <8 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <8 x i1> %a to <8 x i32>
-  store <8 x i32> %b, <8 x i32>* %out
+  store <8 x i32> %b, ptr %out
   ret void
 }
 
@@ -37,7 +37,7 @@ define void @sext_v8i1_v8i32(<8 x i1> %a, <8 x i32>* %out) #0 {
 ; NOTE: Covers the scenario where a SIGN_EXTEND_INREG is required, whose inreg
 ; type's element type is not power-of-2 based and thus cannot be lowered
 ; directly to an SVE instruction.
-define void @sext_v4i3_v4i64(<4 x i3> %a, <4 x i64>* %out) #0 {
+define void @sext_v4i3_v4i64(<4 x i3> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v4i3_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -53,7 +53,7 @@ define void @sext_v4i3_v4i64(<4 x i3> %a, <4 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <4 x i3> %a to <4 x i64>
-  store <4 x i64> %b, <4 x i64>* %out
+  store <4 x i64> %b, ptr %out
   ret void
 }
 
@@ -61,7 +61,7 @@ define void @sext_v4i3_v4i64(<4 x i3> %a, <4 x i64>* %out) #0 {
 ; sext i8 -> i16
 ;
 
-define void @sext_v16i8_v16i16(<16 x i8> %a, <16 x i16>* %out) #0 {
+define void @sext_v16i8_v16i16(<16 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v16i8_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -71,12 +71,12 @@ define void @sext_v16i8_v16i16(<16 x i8> %a, <16 x i16>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <16 x i8> %a to <16 x i16>
-  store <16 x i16>%b, <16 x i16>* %out
+  store <16 x i16>%b, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the extend being combined with the load.
-define void @sext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
+define void @sext_v32i8_v32i16(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: sext_v32i8_v32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -91,10 +91,10 @@ define void @sext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    stp q3, q1, [x1, #32]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i8>, <32 x i8>* %in
+  %a = load <32 x i8>, ptr %in
   %b = add <32 x i8> %a, %a
   %c = sext <32 x i8> %b to <32 x i16>
-  store <32 x i16> %c, <32 x i16>* %out
+  store <32 x i16> %c, ptr %out
   ret void
 }
 
@@ -102,7 +102,7 @@ define void @sext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
 ; sext i8 -> i32
 ;
 
-define void @sext_v8i8_v8i32(<8 x i8> %a, <8 x i32>* %out) #0 {
+define void @sext_v8i8_v8i32(<8 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v8i8_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -113,11 +113,11 @@ define void @sext_v8i8_v8i32(<8 x i8> %a, <8 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <8 x i8> %a to <8 x i32>
-  store <8 x i32>%b, <8 x i32>* %out
+  store <8 x i32>%b, ptr %out
   ret void
 }
 
-define void @sext_v16i8_v16i32(<16 x i8> %a, <16 x i32>* %out) #0 {
+define void @sext_v16i8_v16i32(<16 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v16i8_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -134,11 +134,11 @@ define void @sext_v16i8_v16i32(<16 x i8> %a, <16 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <16 x i8> %a to <16 x i32>
-  store <16 x i32> %b, <16 x i32>* %out
+  store <16 x i32> %b, ptr %out
   ret void
 }
 
-define void @sext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
+define void @sext_v32i8_v32i32(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: sext_v32i8_v32i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -167,10 +167,10 @@ define void @sext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q6, q0, [x1, #32]
 ; CHECK-NEXT:    stp q5, q1, [x1, #96]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i8>, <32 x i8>* %in
+  %a = load <32 x i8>, ptr %in
   %b = add <32 x i8> %a, %a
   %c = sext <32 x i8> %b to <32 x i32>
-  store <32 x i32> %c, <32 x i32>* %out
+  store <32 x i32> %c, ptr %out
   ret void
 }
 
@@ -181,7 +181,7 @@ define void @sext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
 ; NOTE: v4i8 is an unpacked typed stored within a v4i16 container. The sign
 ; extend is a two step process where the container is any_extend'd with the
 ; result feeding an inreg sign extend.
-define void @sext_v4i8_v4i64(<4 x i8> %a, <4 x i64>* %out) #0 {
+define void @sext_v4i8_v4i64(<4 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v4i8_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -197,11 +197,11 @@ define void @sext_v4i8_v4i64(<4 x i8> %a, <4 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <4 x i8> %a to <4 x i64>
-  store <4 x i64>%b, <4 x i64>* %out
+  store <4 x i64>%b, ptr %out
   ret void
 }
 
-define void @sext_v8i8_v8i64(<8 x i8> %a, <8 x i64>* %out) #0 {
+define void @sext_v8i8_v8i64(<8 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v8i8_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -219,11 +219,11 @@ define void @sext_v8i8_v8i64(<8 x i8> %a, <8 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q3, q0, [x0, #32]
 ; CHECK-NEXT:    ret
   %b = sext <8 x i8> %a to <8 x i64>
-  store <8 x i64>%b, <8 x i64>* %out
+  store <8 x i64>%b, ptr %out
   ret void
 }
 
-define void @sext_v16i8_v16i64(<16 x i8> %a, <16 x i64>* %out) #0 {
+define void @sext_v16i8_v16i64(<16 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v16i8_v16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -254,11 +254,11 @@ define void @sext_v16i8_v16i64(<16 x i8> %a, <16 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q7, q0, [x0, #32]
 ; CHECK-NEXT:    ret
   %b = sext <16 x i8> %a to <16 x i64>
-  store <16 x i64> %b, <16 x i64>* %out
+  store <16 x i64> %b, ptr %out
   ret void
 }
 
-define void @sext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
+define void @sext_v32i8_v32i64(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: sext_v32i8_v32i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -315,10 +315,10 @@ define void @sext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q21, q4, [x1, #192]
 ; CHECK-NEXT:    stp q19, q1, [x1, #224]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i8>, <32 x i8>* %in
+  %a = load <32 x i8>, ptr %in
   %b = add <32 x i8> %a, %a
   %c = sext <32 x i8> %b to <32 x i64>
-  store <32 x i64> %c, <32 x i64>* %out
+  store <32 x i64> %c, ptr %out
   ret void
 }
 
@@ -326,7 +326,7 @@ define void @sext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
 ; sext i16 -> i32
 ;
 
-define void @sext_v8i16_v8i32(<8 x i16> %a, <8 x i32>* %out) #0 {
+define void @sext_v8i16_v8i32(<8 x i16> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v8i16_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -336,11 +336,11 @@ define void @sext_v8i16_v8i32(<8 x i16> %a, <8 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <8 x i16> %a to <8 x i32>
-  store <8 x i32>%b, <8 x i32>* %out
+  store <8 x i32>%b, ptr %out
   ret void
 }
 
-define void @sext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
+define void @sext_v16i16_v16i32(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: sext_v16i16_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -355,10 +355,10 @@ define void @sext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    stp q3, q1, [x1, #32]
 ; CHECK-NEXT:    ret
-  %a = load <16 x i16>, <16 x i16>* %in
+  %a = load <16 x i16>, ptr %in
   %b = add <16 x i16> %a, %a
   %c = sext <16 x i16> %b to <16 x i32>
-  store <16 x i32> %c, <16 x i32>* %out
+  store <16 x i32> %c, ptr %out
   ret void
 }
 
@@ -366,7 +366,7 @@ define void @sext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
 ; sext i16 -> i64
 ;
 
-define void @sext_v4i16_v4i64(<4 x i16> %a, <4 x i64>* %out) #0 {
+define void @sext_v4i16_v4i64(<4 x i16> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v4i16_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -377,11 +377,11 @@ define void @sext_v4i16_v4i64(<4 x i16> %a, <4 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <4 x i16> %a to <4 x i64>
-  store <4 x i64>%b, <4 x i64>* %out
+  store <4 x i64>%b, ptr %out
   ret void
 }
 
-define void @sext_v8i16_v8i64(<8 x i16> %a, <8 x i64>* %out) #0 {
+define void @sext_v8i16_v8i64(<8 x i16> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v8i16_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -398,11 +398,11 @@ define void @sext_v8i16_v8i64(<8 x i16> %a, <8 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <8 x i16> %a to <8 x i64>
-  store <8 x i64>%b, <8 x i64>* %out
+  store <8 x i64>%b, ptr %out
   ret void
 }
 
-define void @sext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
+define void @sext_v16i16_v16i64(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: sext_v16i16_v16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -431,10 +431,10 @@ define void @sext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q6, q0, [x1, #32]
 ; CHECK-NEXT:    stp q5, q1, [x1, #96]
 ; CHECK-NEXT:    ret
-  %a = load <16 x i16>, <16 x i16>* %in
+  %a = load <16 x i16>, ptr %in
   %b = add <16 x i16> %a, %a
   %c = sext <16 x i16> %b to <16 x i64>
-  store <16 x i64> %c, <16 x i64>* %out
+  store <16 x i64> %c, ptr %out
   ret void
 }
 
@@ -442,7 +442,7 @@ define void @sext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
 ; sext i32 -> i64
 ;
 
-define void @sext_v4i32_v4i64(<4 x i32> %a, <4 x i64>* %out) #0 {
+define void @sext_v4i32_v4i64(<4 x i32> %a, ptr %out) #0 {
 ; CHECK-LABEL: sext_v4i32_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -452,11 +452,11 @@ define void @sext_v4i32_v4i64(<4 x i32> %a, <4 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = sext <4 x i32> %a to <4 x i64>
-  store <4 x i64>%b, <4 x i64>* %out
+  store <4 x i64>%b, ptr %out
   ret void
 }
 
-define void @sext_v8i32_v8i64(<8 x i32>* %in, <8 x i64>* %out) #0 {
+define void @sext_v8i32_v8i64(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: sext_v8i32_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -471,10 +471,10 @@ define void @sext_v8i32_v8i64(<8 x i32>* %in, <8 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    stp q3, q1, [x1, #32]
 ; CHECK-NEXT:    ret
-  %a = load <8 x i32>, <8 x i32>* %in
+  %a = load <8 x i32>, ptr %in
   %b = add <8 x i32> %a, %a
   %c = sext <8 x i32> %b to <8 x i64>
-  store <8 x i64> %c, <8 x i64>* %out
+  store <8 x i64> %c, ptr %out
   ret void
 }
 
@@ -482,7 +482,7 @@ define void @sext_v8i32_v8i64(<8 x i32>* %in, <8 x i64>* %out) #0 {
 ; zext i8 -> i16
 ;
 
-define void @zext_v16i8_v16i16(<16 x i8> %a, <16 x i16>* %out) #0 {
+define void @zext_v16i8_v16i16(<16 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v16i8_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -492,12 +492,12 @@ define void @zext_v16i8_v16i16(<16 x i8> %a, <16 x i16>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <16 x i8> %a to <16 x i16>
-  store <16 x i16>%b, <16 x i16>* %out
+  store <16 x i16>%b, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the extend being combined with the load.
-define void @zext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
+define void @zext_v32i8_v32i16(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: zext_v32i8_v32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -512,10 +512,10 @@ define void @zext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    stp q3, q1, [x1, #32]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i8>, <32 x i8>* %in
+  %a = load <32 x i8>, ptr %in
   %b = add <32 x i8> %a, %a
   %c = zext <32 x i8> %b to <32 x i16>
-  store <32 x i16> %c, <32 x i16>* %out
+  store <32 x i16> %c, ptr %out
   ret void
 }
 
@@ -523,7 +523,7 @@ define void @zext_v32i8_v32i16(<32 x i8>* %in, <32 x i16>* %out) #0 {
 ; zext i8 -> i32
 ;
 
-define void @zext_v8i8_v8i32(<8 x i8> %a, <8 x i32>* %out) #0 {
+define void @zext_v8i8_v8i32(<8 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v8i8_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -534,11 +534,11 @@ define void @zext_v8i8_v8i32(<8 x i8> %a, <8 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <8 x i8> %a to <8 x i32>
-  store <8 x i32>%b, <8 x i32>* %out
+  store <8 x i32>%b, ptr %out
   ret void
 }
 
-define void @zext_v16i8_v16i32(<16 x i8> %a, <16 x i32>* %out) #0 {
+define void @zext_v16i8_v16i32(<16 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v16i8_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -555,11 +555,11 @@ define void @zext_v16i8_v16i32(<16 x i8> %a, <16 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <16 x i8> %a to <16 x i32>
-  store <16 x i32> %b, <16 x i32>* %out
+  store <16 x i32> %b, ptr %out
   ret void
 }
 
-define void @zext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
+define void @zext_v32i8_v32i32(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: zext_v32i8_v32i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -588,10 +588,10 @@ define void @zext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q6, q0, [x1, #32]
 ; CHECK-NEXT:    stp q5, q1, [x1, #96]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i8>, <32 x i8>* %in
+  %a = load <32 x i8>, ptr %in
   %b = add <32 x i8> %a, %a
   %c = zext <32 x i8> %b to <32 x i32>
-  store <32 x i32> %c, <32 x i32>* %out
+  store <32 x i32> %c, ptr %out
   ret void
 }
 
@@ -602,7 +602,7 @@ define void @zext_v32i8_v32i32(<32 x i8>* %in, <32 x i32>* %out) #0 {
 ; NOTE: v4i8 is an unpacked typed stored within a v4i16 container. The zero
 ; extend is a two step process where the container is zero_extend_inreg'd with
 ; the result feeding a normal zero extend from halfs to doublewords.
-define void @zext_v4i8_v4i64(<4 x i8> %a, <4 x i64>* %out) #0 {
+define void @zext_v4i8_v4i64(<4 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v4i8_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -614,11 +614,11 @@ define void @zext_v4i8_v4i64(<4 x i8> %a, <4 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <4 x i8> %a to <4 x i64>
-  store <4 x i64>%b, <4 x i64>* %out
+  store <4 x i64>%b, ptr %out
   ret void
 }
 
-define void @zext_v8i8_v8i64(<8 x i8> %a, <8 x i64>* %out) #0 {
+define void @zext_v8i8_v8i64(<8 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v8i8_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -636,11 +636,11 @@ define void @zext_v8i8_v8i64(<8 x i8> %a, <8 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q3, q0, [x0, #32]
 ; CHECK-NEXT:    ret
   %b = zext <8 x i8> %a to <8 x i64>
-  store <8 x i64>%b, <8 x i64>* %out
+  store <8 x i64>%b, ptr %out
   ret void
 }
 
-define void @zext_v16i8_v16i64(<16 x i8> %a, <16 x i64>* %out) #0 {
+define void @zext_v16i8_v16i64(<16 x i8> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v16i8_v16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -671,11 +671,11 @@ define void @zext_v16i8_v16i64(<16 x i8> %a, <16 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q7, q0, [x0, #32]
 ; CHECK-NEXT:    ret
   %b = zext <16 x i8> %a to <16 x i64>
-  store <16 x i64> %b, <16 x i64>* %out
+  store <16 x i64> %b, ptr %out
   ret void
 }
 
-define void @zext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
+define void @zext_v32i8_v32i64(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: zext_v32i8_v32i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -732,10 +732,10 @@ define void @zext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q21, q4, [x1, #192]
 ; CHECK-NEXT:    stp q19, q1, [x1, #224]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i8>, <32 x i8>* %in
+  %a = load <32 x i8>, ptr %in
   %b = add <32 x i8> %a, %a
   %c = zext <32 x i8> %b to <32 x i64>
-  store <32 x i64> %c, <32 x i64>* %out
+  store <32 x i64> %c, ptr %out
   ret void
 }
 
@@ -743,7 +743,7 @@ define void @zext_v32i8_v32i64(<32 x i8>* %in, <32 x i64>* %out) #0 {
 ; zext i16 -> i32
 ;
 
-define void @zext_v8i16_v8i32(<8 x i16> %a, <8 x i32>* %out) #0 {
+define void @zext_v8i16_v8i32(<8 x i16> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v8i16_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -753,11 +753,11 @@ define void @zext_v8i16_v8i32(<8 x i16> %a, <8 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <8 x i16> %a to <8 x i32>
-  store <8 x i32>%b, <8 x i32>* %out
+  store <8 x i32>%b, ptr %out
   ret void
 }
 
-define void @zext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
+define void @zext_v16i16_v16i32(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: zext_v16i16_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -772,10 +772,10 @@ define void @zext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    stp q3, q1, [x1, #32]
 ; CHECK-NEXT:    ret
-  %a = load <16 x i16>, <16 x i16>* %in
+  %a = load <16 x i16>, ptr %in
   %b = add <16 x i16> %a, %a
   %c = zext <16 x i16> %b to <16 x i32>
-  store <16 x i32> %c, <16 x i32>* %out
+  store <16 x i32> %c, ptr %out
   ret void
 }
 
@@ -783,7 +783,7 @@ define void @zext_v16i16_v16i32(<16 x i16>* %in, <16 x i32>* %out) #0 {
 ; zext i16 -> i64
 ;
 
-define void @zext_v4i16_v4i64(<4 x i16> %a, <4 x i64>* %out) #0 {
+define void @zext_v4i16_v4i64(<4 x i16> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v4i16_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -794,11 +794,11 @@ define void @zext_v4i16_v4i64(<4 x i16> %a, <4 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <4 x i16> %a to <4 x i64>
-  store <4 x i64>%b, <4 x i64>* %out
+  store <4 x i64>%b, ptr %out
   ret void
 }
 
-define void @zext_v8i16_v8i64(<8 x i16> %a, <8 x i64>* %out) #0 {
+define void @zext_v8i16_v8i64(<8 x i16> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v8i16_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -815,11 +815,11 @@ define void @zext_v8i16_v8i64(<8 x i16> %a, <8 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <8 x i16> %a to <8 x i64>
-  store <8 x i64>%b, <8 x i64>* %out
+  store <8 x i64>%b, ptr %out
   ret void
 }
 
-define void @zext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
+define void @zext_v16i16_v16i64(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: zext_v16i16_v16i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -848,10 +848,10 @@ define void @zext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q6, q0, [x1, #32]
 ; CHECK-NEXT:    stp q5, q1, [x1, #96]
 ; CHECK-NEXT:    ret
-  %a = load <16 x i16>, <16 x i16>* %in
+  %a = load <16 x i16>, ptr %in
   %b = add <16 x i16> %a, %a
   %c = zext <16 x i16> %b to <16 x i64>
-  store <16 x i64> %c, <16 x i64>* %out
+  store <16 x i64> %c, ptr %out
   ret void
 }
 
@@ -859,7 +859,7 @@ define void @zext_v16i16_v16i64(<16 x i16>* %in, <16 x i64>* %out) #0 {
 ; zext i32 -> i64
 ;
 
-define void @zext_v4i32_v4i64(<4 x i32> %a, <4 x i64>* %out) #0 {
+define void @zext_v4i32_v4i64(<4 x i32> %a, ptr %out) #0 {
 ; CHECK-LABEL: zext_v4i32_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -869,11 +869,11 @@ define void @zext_v4i32_v4i64(<4 x i32> %a, <4 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %b = zext <4 x i32> %a to <4 x i64>
-  store <4 x i64>%b, <4 x i64>* %out
+  store <4 x i64>%b, ptr %out
   ret void
 }
 
-define void @zext_v8i32_v8i64(<8 x i32>* %in, <8 x i64>* %out) #0 {
+define void @zext_v8i32_v8i64(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: zext_v8i32_v8i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -888,10 +888,10 @@ define void @zext_v8i32_v8i64(<8 x i32>* %in, <8 x i64>* %out) #0 {
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    stp q3, q1, [x1, #32]
 ; CHECK-NEXT:    ret
-  %a = load <8 x i32>, <8 x i32>* %in
+  %a = load <8 x i32>, ptr %in
   %b = add <8 x i32> %a, %a
   %c = zext <8 x i32> %b to <8 x i64>
-  store <8 x i64> %c, <8 x i64>* %out
+  store <8 x i64> %c, ptr %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll
index f4af738204a87..d2eea846425ef 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-log.ll
@@ -31,7 +31,7 @@ define <16 x i8> @and_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @and_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @and_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: and_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -40,10 +40,10 @@ define void @and_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    and z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = and <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -71,7 +71,7 @@ define <8 x i16> @and_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @and_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @and_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: and_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -80,10 +80,10 @@ define void @and_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    and z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = and <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -111,7 +111,7 @@ define <4 x i32> @and_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @and_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @and_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: and_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -120,10 +120,10 @@ define void @and_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    and z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = and <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -151,7 +151,7 @@ define <2 x i64> @and_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @and_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @and_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: and_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -160,10 +160,10 @@ define void @and_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    and z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = and <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -195,7 +195,7 @@ define <16 x i8> @or_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @or_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @or_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: or_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -204,10 +204,10 @@ define void @or_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    orr z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = or <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -235,7 +235,7 @@ define <8 x i16> @or_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @or_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @or_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: or_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -244,10 +244,10 @@ define void @or_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    orr z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = or <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -275,7 +275,7 @@ define <4 x i32> @or_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @or_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @or_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: or_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -284,10 +284,10 @@ define void @or_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    orr z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = or <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -315,7 +315,7 @@ define <2 x i64> @or_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @or_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @or_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: or_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -324,10 +324,10 @@ define void @or_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    orr z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = or <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -359,7 +359,7 @@ define <16 x i8> @xor_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @xor_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @xor_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: xor_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -368,10 +368,10 @@ define void @xor_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    eor z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = xor <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -399,7 +399,7 @@ define <8 x i16> @xor_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @xor_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @xor_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: xor_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -408,10 +408,10 @@ define void @xor_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    eor z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = xor <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -439,7 +439,7 @@ define <4 x i32> @xor_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @xor_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @xor_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: xor_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -448,10 +448,10 @@ define void @xor_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    eor z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = xor <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -479,7 +479,7 @@ define <2 x i64> @xor_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @xor_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @xor_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: xor_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -488,10 +488,10 @@ define void @xor_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    eor z1.d, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = xor <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
index 96b7b8066be1e..86ea36ca1fb4d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
@@ -33,7 +33,7 @@ define <16 x i8> @smax_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @smax_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @smax_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smax_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -43,10 +43,10 @@ define void @smax_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    smax z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %op1, <32 x i8> %op2)
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -76,7 +76,7 @@ define <8 x i16> @smax_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @smax_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @smax_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smax_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -86,10 +86,10 @@ define void @smax_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    smax z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %op1, <16 x i16> %op2)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -119,7 +119,7 @@ define <4 x i32> @smax_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @smax_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @smax_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smax_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -129,10 +129,10 @@ define void @smax_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    smax z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %op1, <8 x i32> %op2)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -164,7 +164,7 @@ define <2 x i64> @smax_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @smax_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @smax_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smax_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -174,10 +174,10 @@ define void @smax_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    smax z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %op1, <4 x i64> %op2)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -211,7 +211,7 @@ define <16 x i8> @smin_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @smin_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @smin_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smin_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -221,10 +221,10 @@ define void @smin_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    smin z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %op1, <32 x i8> %op2)
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -254,7 +254,7 @@ define <8 x i16> @smin_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @smin_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @smin_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smin_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -264,10 +264,10 @@ define void @smin_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    smin z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %op1, <16 x i16> %op2)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -297,7 +297,7 @@ define <4 x i32> @smin_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @smin_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @smin_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smin_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -307,10 +307,10 @@ define void @smin_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    smin z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %op1, <8 x i32> %op2)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -342,7 +342,7 @@ define <2 x i64> @smin_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @smin_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @smin_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smin_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -352,10 +352,10 @@ define void @smin_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    smin z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %op1, <4 x i64> %op2)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -389,7 +389,7 @@ define <16 x i8> @umax_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @umax_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @umax_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umax_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -399,10 +399,10 @@ define void @umax_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    umax z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %op1, <32 x i8> %op2)
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -432,7 +432,7 @@ define <8 x i16> @umax_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @umax_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @umax_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umax_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -442,10 +442,10 @@ define void @umax_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    umax z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %op1, <16 x i16> %op2)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -475,7 +475,7 @@ define <4 x i32> @umax_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @umax_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @umax_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umax_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -485,10 +485,10 @@ define void @umax_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    umax z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = call <8 x i32> @llvm.umax.v8i32(<8 x i32> %op1, <8 x i32> %op2)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -520,7 +520,7 @@ define <2 x i64> @umax_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @umax_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @umax_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umax_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -530,10 +530,10 @@ define void @umax_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    umax z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %op1, <4 x i64> %op2)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -567,7 +567,7 @@ define <16 x i8> @umin_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @umin_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @umin_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umin_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -577,10 +577,10 @@ define void @umin_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    umin z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %op1, <32 x i8> %op2)
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -610,7 +610,7 @@ define <8 x i16> @umin_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @umin_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @umin_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umin_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -620,10 +620,10 @@ define void @umin_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    umin z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %op1, <16 x i16> %op2)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -653,7 +653,7 @@ define <4 x i32> @umin_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @umin_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @umin_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umin_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -663,10 +663,10 @@ define void @umin_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    umin z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %op1, <8 x i32> %op2)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -698,7 +698,7 @@ define <2 x i64> @umin_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @umin_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @umin_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umin_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -708,10 +708,10 @@ define void @umin_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    umin z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %op1, <4 x i64> %op2)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
index ab6c5f0307cc6..c560516ed3f64 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
@@ -70,7 +70,7 @@ define <16 x i8> @smulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @smulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @smulh_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smulh_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -107,14 +107,14 @@ define void @smulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    splice z2.b, p0, z2.b, z0.b
 ; CHECK-NEXT:    stp q3, q2, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %1 = sext <32 x i8> %op1 to <32 x i16>
   %2 = sext <32 x i8> %op2 to <32 x i16>
   %mul = mul <32 x i16> %1, %2
   %shr = lshr <32 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   %res = trunc <32 x i16> %shr to <32 x i8>
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -174,7 +174,7 @@ define <8 x i16> @smulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @smulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @smulh_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smulh_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -197,14 +197,14 @@ define void @smulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    splice z1.h, p0, z1.h, z2.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %1 = sext <16 x i16> %op1 to <16 x i32>
   %2 = sext <16 x i16> %op2 to <16 x i32>
   %mul = mul <16 x i32> %1, %2
   %shr = lshr <16 x i32> %mul, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
   %res = trunc <16 x i32> %shr to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -242,7 +242,7 @@ define <4 x i32> @smulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @smulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @smulh_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smulh_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -265,14 +265,14 @@ define void @smulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    splice z1.s, p0, z1.s, z2.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %1 = sext <8 x i32> %op1 to <8 x i64>
   %2 = sext <8 x i32> %op2 to <8 x i64>
   %mul = mul <8 x i64> %1, %2
   %shr = lshr <8 x i64> %mul,  <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
   %res = trunc <8 x i64> %shr to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -312,7 +312,7 @@ define <2 x i64> @smulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @smulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @smulh_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: smulh_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -342,14 +342,14 @@ define void @smulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
 ; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %1 = sext <4 x i64> %op1 to <4 x i128>
   %2 = sext <4 x i64> %op2 to <4 x i128>
   %mul = mul <4 x i128> %1, %2
   %shr = lshr <4 x i128> %mul, <i128 64, i128 64, i128 64, i128 64>
   %res = trunc <4 x i128> %shr to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -411,7 +411,7 @@ define <16 x i8> @umulh_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @umulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @umulh_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umulh_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -448,14 +448,14 @@ define void @umulh_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    splice z2.b, p0, z2.b, z0.b
 ; CHECK-NEXT:    stp q3, q2, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %1 = zext <32 x i8> %op1 to <32 x i16>
   %2 = zext <32 x i8> %op2 to <32 x i16>
   %mul = mul <32 x i16> %1, %2
   %shr = lshr <32 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   %res = trunc <32 x i16> %shr to <32 x i8>
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -513,7 +513,7 @@ define <8 x i16> @umulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @umulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @umulh_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umulh_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -536,14 +536,14 @@ define void @umulh_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    splice z1.h, p0, z1.h, z2.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %1 = zext <16 x i16> %op1 to <16 x i32>
   %2 = zext <16 x i16> %op2 to <16 x i32>
   %mul = mul <16 x i32> %1, %2
   %shr = lshr <16 x i32> %mul, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
   %res = trunc <16 x i32> %shr to <16 x i16>
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -581,7 +581,7 @@ define <4 x i32> @umulh_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @umulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @umulh_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umulh_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -604,8 +604,8 @@ define void @umulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    splice z1.s, p0, z1.s, z2.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %insert = insertelement <8 x i64> undef, i64 32, i64 0
   %splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
   %1 = zext <8 x i32> %op1 to <8 x i64>
@@ -613,7 +613,7 @@ define void @umulh_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
   %mul = mul <8 x i64> %1, %2
   %shr = lshr <8 x i64> %mul, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
   %res = trunc <8 x i64> %shr to <8 x i32>
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -651,7 +651,7 @@ define <2 x i64> @umulh_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @umulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @umulh_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: umulh_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -681,14 +681,14 @@ define void @umulh_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    splice z2.d, p0, z2.d, z3.d
 ; CHECK-NEXT:    stp q0, q2, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %1 = zext <4 x i64> %op1 to <4 x i128>
   %2 = zext <4 x i64> %op2 to <4 x i128>
   %mul = mul <4 x i128> %1, %2
   %shr = lshr <4 x i128> %mul, <i128 64, i128 64, i128 64, i128 64>
   %res = trunc <4 x i128> %shr to <4 x i64>
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
index 053047ba3f20f..3980db7b5305b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-reduce.ll
@@ -33,7 +33,7 @@ define i8 @uaddv_v16i8(<16 x i8> %a) #0 {
   ret i8 %res
 }
 
-define i8 @uaddv_v32i8(<32 x i8>* %a) #0 {
+define i8 @uaddv_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: uaddv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -43,7 +43,7 @@ define i8 @uaddv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
-  %op = load <32 x i8>, <32 x i8>* %a
+  %op = load <32 x i8>, ptr %a
   %res = call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %op)
   ret i8 %res
 }
@@ -74,7 +74,7 @@ define i16 @uaddv_v8i16(<8 x i16> %a) #0 {
   ret i16 %res
 }
 
-define i16 @uaddv_v16i16(<16 x i16>* %a) #0 {
+define i16 @uaddv_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: uaddv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -84,7 +84,7 @@ define i16 @uaddv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %res = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %op)
   ret i16 %res
 }
@@ -115,7 +115,7 @@ define i32 @uaddv_v4i32(<4 x i32> %a) #0 {
   ret i32 %res
 }
 
-define i32 @uaddv_v8i32(<8 x i32>* %a) #0 {
+define i32 @uaddv_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: uaddv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -125,7 +125,7 @@ define i32 @uaddv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %res = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %op)
   ret i32 %res
 }
@@ -142,7 +142,7 @@ define i64 @uaddv_v2i64(<2 x i64> %a) #0 {
   ret i64 %res
 }
 
-define i64 @uaddv_v4i64(<4 x i64>* %a) #0 {
+define i64 @uaddv_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: uaddv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -151,7 +151,7 @@ define i64 @uaddv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    uaddv d0, p0, z0.d
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %res = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %op)
   ret i64 %res
 }
@@ -184,7 +184,7 @@ define i8 @smaxv_v16i8(<16 x i8> %a) #0 {
   ret i8 %res
 }
 
-define i8 @smaxv_v32i8(<32 x i8>* %a) #0 {
+define i8 @smaxv_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: smaxv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -193,7 +193,7 @@ define i8 @smaxv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    smaxv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <32 x i8>, <32 x i8>* %a
+  %op = load <32 x i8>, ptr %a
   %res = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %op)
   ret i8 %res
 }
@@ -222,7 +222,7 @@ define i16 @smaxv_v8i16(<8 x i16> %a) #0 {
   ret i16 %res
 }
 
-define i16 @smaxv_v16i16(<16 x i16>* %a) #0 {
+define i16 @smaxv_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: smaxv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -231,7 +231,7 @@ define i16 @smaxv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    smaxv h0, p0, z0.h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %res = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %op)
   ret i16 %res
 }
@@ -260,7 +260,7 @@ define i32 @smaxv_v4i32(<4 x i32> %a) #0 {
   ret i32 %res
 }
 
-define i32 @smaxv_v8i32(<8 x i32>* %a) #0 {
+define i32 @smaxv_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: smaxv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -269,7 +269,7 @@ define i32 @smaxv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    smaxv s0, p0, z0.s
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %res = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %op)
   ret i32 %res
 }
@@ -287,7 +287,7 @@ define i64 @smaxv_v2i64(<2 x i64> %a) #0 {
   ret i64 %res
 }
 
-define i64 @smaxv_v4i64(<4 x i64>* %a) #0 {
+define i64 @smaxv_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: smaxv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -296,7 +296,7 @@ define i64 @smaxv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    smaxv d0, p0, z0.d
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %res = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %op)
   ret i64 %res
 }
@@ -329,7 +329,7 @@ define i8 @sminv_v16i8(<16 x i8> %a) #0 {
   ret i8 %res
 }
 
-define i8 @sminv_v32i8(<32 x i8>* %a) #0 {
+define i8 @sminv_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: sminv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -338,7 +338,7 @@ define i8 @sminv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    sminv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <32 x i8>, <32 x i8>* %a
+  %op = load <32 x i8>, ptr %a
   %res = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %op)
   ret i8 %res
 }
@@ -367,7 +367,7 @@ define i16 @sminv_v8i16(<8 x i16> %a) #0 {
   ret i16 %res
 }
 
-define i16 @sminv_v16i16(<16 x i16>* %a) #0 {
+define i16 @sminv_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: sminv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -376,7 +376,7 @@ define i16 @sminv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    sminv h0, p0, z0.h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %res = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %op)
   ret i16 %res
 }
@@ -405,7 +405,7 @@ define i32 @sminv_v4i32(<4 x i32> %a) #0 {
   ret i32 %res
 }
 
-define i32 @sminv_v8i32(<8 x i32>* %a) #0 {
+define i32 @sminv_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: sminv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -414,7 +414,7 @@ define i32 @sminv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    sminv s0, p0, z0.s
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %res = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %op)
   ret i32 %res
 }
@@ -432,7 +432,7 @@ define i64 @sminv_v2i64(<2 x i64> %a) #0 {
   ret i64 %res
 }
 
-define i64 @sminv_v4i64(<4 x i64>* %a) #0 {
+define i64 @sminv_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: sminv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -441,7 +441,7 @@ define i64 @sminv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    sminv d0, p0, z0.d
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %res = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %op)
   ret i64 %res
 }
@@ -474,7 +474,7 @@ define i8 @umaxv_v16i8(<16 x i8> %a) #0 {
   ret i8 %res
 }
 
-define i8 @umaxv_v32i8(<32 x i8>* %a) #0 {
+define i8 @umaxv_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: umaxv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -483,7 +483,7 @@ define i8 @umaxv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    umaxv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <32 x i8>, <32 x i8>* %a
+  %op = load <32 x i8>, ptr %a
   %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %op)
   ret i8 %res
 }
@@ -512,7 +512,7 @@ define i16 @umaxv_v8i16(<8 x i16> %a) #0 {
   ret i16 %res
 }
 
-define i16 @umaxv_v16i16(<16 x i16>* %a) #0 {
+define i16 @umaxv_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: umaxv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -521,7 +521,7 @@ define i16 @umaxv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    umaxv h0, p0, z0.h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %op)
   ret i16 %res
 }
@@ -550,7 +550,7 @@ define i32 @umaxv_v4i32(<4 x i32> %a) #0 {
   ret i32 %res
 }
 
-define i32 @umaxv_v8i32(<8 x i32>* %a) #0 {
+define i32 @umaxv_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: umaxv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -559,7 +559,7 @@ define i32 @umaxv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    umaxv s0, p0, z0.s
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %op)
   ret i32 %res
 }
@@ -577,7 +577,7 @@ define i64 @umaxv_v2i64(<2 x i64> %a) #0 {
   ret i64 %res
 }
 
-define i64 @umaxv_v4i64(<4 x i64>* %a) #0 {
+define i64 @umaxv_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: umaxv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -586,7 +586,7 @@ define i64 @umaxv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    umaxv d0, p0, z0.d
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %op)
   ret i64 %res
 }
@@ -619,7 +619,7 @@ define i8 @uminv_v16i8(<16 x i8> %a) #0 {
   ret i8 %res
 }
 
-define i8 @uminv_v32i8(<32 x i8>* %a) #0 {
+define i8 @uminv_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: uminv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -628,7 +628,7 @@ define i8 @uminv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    uminv b0, p0, z0.b
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <32 x i8>, <32 x i8>* %a
+  %op = load <32 x i8>, ptr %a
   %res = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %op)
   ret i8 %res
 }
@@ -657,7 +657,7 @@ define i16 @uminv_v8i16(<8 x i16> %a) #0 {
   ret i16 %res
 }
 
-define i16 @uminv_v16i16(<16 x i16>* %a) #0 {
+define i16 @uminv_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: uminv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -666,7 +666,7 @@ define i16 @uminv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    uminv h0, p0, z0.h
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %res = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %op)
   ret i16 %res
 }
@@ -695,7 +695,7 @@ define i32 @uminv_v4i32(<4 x i32> %a) #0 {
   ret i32 %res
 }
 
-define i32 @uminv_v8i32(<8 x i32>* %a) #0 {
+define i32 @uminv_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: uminv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -704,7 +704,7 @@ define i32 @uminv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    uminv s0, p0, z0.s
 ; CHECK-NEXT:    fmov w0, s0
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %res = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %op)
   ret i32 %res
 }
@@ -722,7 +722,7 @@ define i64 @uminv_v2i64(<2 x i64> %a) #0 {
   ret i64 %res
 }
 
-define i64 @uminv_v4i64(<4 x i64>* %a) #0 {
+define i64 @uminv_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: uminv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -731,7 +731,7 @@ define i64 @uminv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    uminv d0, p0, z0.d
 ; CHECK-NEXT:    fmov x0, d0
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %res = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %op)
   ret i64 %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
index f0f74e277ca47..21b42302845ed 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
@@ -108,7 +108,7 @@ define <16 x i8> @srem_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @srem_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @srem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: srem_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -187,10 +187,10 @@ define void @srem_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    mls z0.b, p1/m, z5.b, z2.b
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = srem <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -240,7 +240,7 @@ define <8 x i16> @srem_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @srem_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @srem_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: srem_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q2, q0, [x0]
@@ -279,10 +279,10 @@ define void @srem_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    mls z0.h, p1/m, z5.h, z1.h
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = srem <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -316,7 +316,7 @@ define <4 x i32> @srem_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @srem_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @srem_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: srem_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -330,10 +330,10 @@ define void @srem_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    mls z1.s, p0/m, z5.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = srem <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -367,7 +367,7 @@ define <2 x i64> @srem_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @srem_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @srem_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: srem_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -381,10 +381,10 @@ define void @srem_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    mls z1.d, p0/m, z5.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = srem <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -491,7 +491,7 @@ define <16 x i8> @urem_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @urem_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @urem_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: urem_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -570,10 +570,10 @@ define void @urem_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    mls z0.b, p1/m, z5.b, z2.b
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = urem <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -623,7 +623,7 @@ define <8 x i16> @urem_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @urem_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @urem_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: urem_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q2, q0, [x0]
@@ -662,10 +662,10 @@ define void @urem_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    mls z0.h, p1/m, z5.h, z1.h
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = urem <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -699,7 +699,7 @@ define <4 x i32> @urem_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @urem_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @urem_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: urem_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -713,10 +713,10 @@ define void @urem_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    mls z1.s, p0/m, z5.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = urem <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -750,7 +750,7 @@ define <2 x i64> @urem_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @urem_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @urem_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: urem_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -764,10 +764,10 @@ define void @urem_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    mls z1.d, p0/m, z5.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = urem <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
index 4d02fa70e06c5..6ae31cdec3ddf 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
@@ -49,7 +49,7 @@ define <16 x i8> @ashr_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @ashr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @ashr_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ashr_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -59,10 +59,10 @@ define void @ashr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    asr z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = ashr <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -108,7 +108,7 @@ define <8 x i16> @ashr_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @ashr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @ashr_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ashr_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -118,10 +118,10 @@ define void @ashr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    asr z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = ashr <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -151,7 +151,7 @@ define <4 x i32> @ashr_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @ashr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @ashr_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ashr_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -161,10 +161,10 @@ define void @ashr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    asr z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = ashr <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -194,7 +194,7 @@ define <2 x i64> @ashr_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @ashr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @ashr_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ashr_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -204,10 +204,10 @@ define void @ashr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    asr z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = ashr <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -256,7 +256,7 @@ define <16 x i8> @lshr_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @lshr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @lshr_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: lshr_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -266,10 +266,10 @@ define void @lshr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    lsr z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = lshr <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -314,7 +314,7 @@ define <8 x i16> @lshr_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @lshr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @lshr_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: lshr_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -324,10 +324,10 @@ define void @lshr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    lsr z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = lshr <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -357,7 +357,7 @@ define <4 x i32> @lshr_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @lshr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @lshr_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: lshr_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -367,10 +367,10 @@ define void @lshr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    lsr z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = lshr <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -400,7 +400,7 @@ define <2 x i64> @lshr_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @lshr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @lshr_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: lshr_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -410,10 +410,10 @@ define void @lshr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    lsr z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = lshr <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -475,7 +475,7 @@ define <16 x i8> @shl_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
   ret <16 x i8> %res
 }
 
-define void @shl_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+define void @shl_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: shl_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -485,10 +485,10 @@ define void @shl_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    lsl z1.b, p0/m, z1.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
-  %op2 = load <32 x i8>, <32 x i8>* %b
+  %op1 = load <32 x i8>, ptr %a
+  %op2 = load <32 x i8>, ptr %b
   %res = shl <32 x i8> %op1, %op2
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -518,7 +518,7 @@ define <8 x i16> @shl_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
   ret <8 x i16> %res
 }
 
-define void @shl_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+define void @shl_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: shl_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -528,10 +528,10 @@ define void @shl_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    lsl z1.h, p0/m, z1.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
-  %op2 = load <16 x i16>, <16 x i16>* %b
+  %op1 = load <16 x i16>, ptr %a
+  %op2 = load <16 x i16>, ptr %b
   %res = shl <16 x i16> %op1, %op2
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -561,7 +561,7 @@ define <4 x i32> @shl_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
   ret <4 x i32> %res
 }
 
-define void @shl_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+define void @shl_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: shl_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -571,10 +571,10 @@ define void @shl_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    lsl z1.s, p0/m, z1.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
-  %op2 = load <8 x i32>, <8 x i32>* %b
+  %op1 = load <8 x i32>, ptr %a
+  %op2 = load <8 x i32>, ptr %b
   %res = shl <8 x i32> %op1, %op2
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -604,7 +604,7 @@ define <2 x i64> @shl_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
   ret <2 x i64> %res
 }
 
-define void @shl_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+define void @shl_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: shl_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -614,10 +614,10 @@ define void @shl_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    lsl z1.d, p0/m, z1.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
-  %op2 = load <4 x i64>, <4 x i64>* %b
+  %op1 = load <4 x i64>, ptr %a
+  %op2 = load <4 x i64>, ptr %b
   %res = shl <4 x i64> %op1, %op2
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
index f0bd11500c3d4..493778e17f7cd 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-to-fp.ll
@@ -19,7 +19,7 @@ define <4 x half> @ucvtf_v4i16_v4f16(<4 x i16> %op1) #0 {
   ret <4 x half> %res
 }
 
-define void @ucvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
+define void @ucvtf_v8i16_v8f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i16_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -27,13 +27,13 @@ define void @ucvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z0.h, p0/m, z0.h
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op1 = load <8 x i16>, ptr %a
   %res = uitofp <8 x i16> %op1 to <8 x half>
-  store <8 x half> %res, <8 x half>* %b
+  store <8 x half> %res, ptr %b
   ret void
 }
 
-define void @ucvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
+define void @ucvtf_v16i16_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v16i16_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -42,9 +42,9 @@ define void @ucvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = uitofp <16 x i16> %op1 to <16 x half>
-  store <16 x half> %res, <16 x half>* %b
+  store <16 x half> %res, ptr %b
   ret void
 }
 
@@ -78,7 +78,7 @@ define <4 x float> @ucvtf_v4i16_v4f32(<4 x i16> %op1) #0 {
   ret <4 x float> %res
 }
 
-define void @ucvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
+define void @ucvtf_v8i16_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i16_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -90,13 +90,13 @@ define void @ucvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z0.s, p0/m, z0.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op1 = load <8 x i16>, ptr %a
   %res = uitofp <8 x i16> %op1 to <8 x float>
-  store <8 x float> %res, <8 x float>* %b
+  store <8 x float> %res, ptr %b
   ret void
 }
 
-define void @ucvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
+define void @ucvtf_v16i16_v16f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v16i16_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -115,9 +115,9 @@ define void @ucvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z1.s, p0/m, z2.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = uitofp <16 x i16> %op1 to <16 x float>
-  store <16 x float> %res, <16 x float>* %b
+  store <16 x float> %res, ptr %b
   ret void
 }
 
@@ -151,7 +151,7 @@ define <2 x double> @ucvtf_v2i16_v2f64(<2 x i16> %op1) #0 {
   ret <2 x double> %res
 }
 
-define void @ucvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
+define void @ucvtf_v4i16_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v4i16_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -164,13 +164,13 @@ define void @ucvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i16>, <4 x i16>* %a
+  %op1 = load <4 x i16>, ptr %a
   %res = uitofp <4 x i16> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 
-define void @ucvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
+define void @ucvtf_v8i16_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i16_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -192,13 +192,13 @@ define void @ucvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z0.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op1 = load <8 x i16>, ptr %a
   %res = uitofp <8 x i16> %op1 to <8 x double>
-  store <8 x double> %res, <8 x double>* %b
+  store <8 x double> %res, ptr %b
   ret void
 }
 
-define void @ucvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
+define void @ucvtf_v16i16_v16f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v16i16_v16f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -238,9 +238,9 @@ define void @ucvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z0.d, p0/m, z2.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = uitofp <16 x i16> %op1 to <16 x double>
-  store <16 x double> %res, <16 x double>* %b
+  store <16 x double> %res, ptr %b
   ret void
 }
 
@@ -274,7 +274,7 @@ define <4 x half> @ucvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
   ret <4 x half> %res
 }
 
-define <8 x half> @ucvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
+define <8 x half> @ucvtf_v8i32_v8f16(ptr %a) #0 {
 ; CHECK-LABEL: ucvtf_v8i32_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -287,12 +287,12 @@ define <8 x half> @ucvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = uitofp <8 x i32> %op1 to <8 x half>
   ret <8 x half> %res
 }
 
-define void @ucvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 {
+define void @ucvtf_v16i32_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v16i32_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -311,9 +311,9 @@ define void @ucvtf_v16i32_v16f16(<16 x i32>* %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    splice z3.h, p1, z3.h, z2.h
 ; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
+  %op1 = load <16 x i32>, ptr %a
   %res = uitofp <16 x i32> %op1 to <16 x half>
-  store <16 x half> %res, <16 x half>* %b
+  store <16 x half> %res, ptr %b
   ret void
 }
 
@@ -345,7 +345,7 @@ define <4 x float> @ucvtf_v4i32_v4f32(<4 x i32> %op1) #0 {
   ret <4 x float> %res
 }
 
-define void @ucvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
+define void @ucvtf_v8i32_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i32_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -354,9 +354,9 @@ define void @ucvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = uitofp <8 x i32> %op1 to <8 x float>
-  store <8 x float> %res, <8 x float>* %b
+  store <8 x float> %res, ptr %b
   ret void
 }
 
@@ -377,7 +377,7 @@ define <2 x double> @ucvtf_v2i32_v2f64(<2 x i32> %op1) #0 {
   ret <2 x double> %res
 }
 
-define void @ucvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
+define void @ucvtf_v4i32_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v4i32_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -389,13 +389,13 @@ define void @ucvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z0.d, p0/m, z0.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i32>, <4 x i32>* %a
+  %op1 = load <4 x i32>, ptr %a
   %res = uitofp <4 x i32> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 
-define void @ucvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
+define void @ucvtf_v8i32_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i32_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -414,9 +414,9 @@ define void @ucvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z1.d, p0/m, z2.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = uitofp <8 x i32> %op1 to <8 x double>
-  store <8 x double> %res, <8 x double>* %b
+  store <8 x double> %res, ptr %b
   ret void
 }
 
@@ -444,7 +444,7 @@ define <2 x half> @ucvtf_v2i64_v2f16(<2 x i64> %op1) #0 {
   ret <2 x half> %res
 }
 
-define <4 x half> @ucvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
+define <4 x half> @ucvtf_v4i64_v4f16(ptr %a) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -461,12 +461,12 @@ define <4 x half> @ucvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = uitofp <4 x i64> %op1 to <4 x half>
   ret <4 x half> %res
 }
 
-define <8 x half> @ucvtf_v8i64_v8f16(<8 x i64>* %a) #0 {
+define <8 x half> @ucvtf_v8i64_v8f16(ptr %a) #0 {
 ; CHECK-LABEL: ucvtf_v8i64_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -493,7 +493,7 @@ define <8 x half> @ucvtf_v8i64_v8f16(<8 x i64>* %a) #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i64>, <8 x i64>* %a
+  %op1 = load <8 x i64>, ptr %a
   %res = uitofp <8 x i64> %op1 to <8 x half>
   ret <8 x half> %res
 }
@@ -515,7 +515,7 @@ define <2 x float> @ucvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
   ret <2 x float> %res
 }
 
-define <4 x float> @ucvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
+define <4 x float> @ucvtf_v4i64_v4f32(ptr %a) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -528,12 +528,12 @@ define <4 x float> @ucvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = uitofp <4 x i64> %op1 to <4 x float>
   ret <4 x float> %res
 }
 
-define void @ucvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 {
+define void @ucvtf_v8i64_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v8i64_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -552,9 +552,9 @@ define void @ucvtf_v8i64_v8f32(<8 x i64>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    splice z3.s, p1, z3.s, z2.s
 ; CHECK-NEXT:    stp q0, q3, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i64>, <8 x i64>* %a
+  %op1 = load <8 x i64>, ptr %a
   %res = uitofp <8 x i64> %op1 to <8 x float>
-  store <8 x float> %res, <8 x float>* %b
+  store <8 x float> %res, ptr %b
   ret void
 }
 
@@ -574,7 +574,7 @@ define <2 x double> @ucvtf_v2i64_v2f64(<2 x i64> %op1) #0 {
   ret <2 x double> %res
 }
 
-define void @ucvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
+define void @ucvtf_v4i64_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: ucvtf_v4i64_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -583,9 +583,9 @@ define void @ucvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    ucvtf z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = uitofp <4 x i64> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 
@@ -605,7 +605,7 @@ define <4 x half> @scvtf_v4i16_v4f16(<4 x i16> %op1) #0 {
   ret <4 x half> %res
 }
 
-define void @scvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
+define void @scvtf_v8i16_v8f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v8i16_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -613,13 +613,13 @@ define void @scvtf_v8i16_v8f16(<8 x i16>* %a, <8 x half>* %b) #0 {
 ; CHECK-NEXT:    scvtf z0.h, p0/m, z0.h
 ; CHECK-NEXT:    str q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op1 = load <8 x i16>, ptr %a
   %res = sitofp <8 x i16> %op1 to <8 x half>
-  store <8 x half> %res, <8 x half>* %b
+  store <8 x half> %res, ptr %b
   ret void
 }
 
-define void @scvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
+define void @scvtf_v16i16_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v16i16_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -628,9 +628,9 @@ define void @scvtf_v16i16_v16f16(<16 x i16>* %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    scvtf z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = sitofp <16 x i16> %op1 to <16 x half>
-  store <16 x half> %res, <16 x half>* %b
+  store <16 x half> %res, ptr %b
   ret void
 }
 
@@ -664,7 +664,7 @@ define <4 x float> @scvtf_v4i16_v4f32(<4 x i16> %op1) #0 {
   ret <4 x float> %res
 }
 
-define void @scvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
+define void @scvtf_v8i16_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v8i16_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -676,13 +676,13 @@ define void @scvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    scvtf z0.s, p0/m, z0.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op1 = load <8 x i16>, ptr %a
   %res = sitofp <8 x i16> %op1 to <8 x float>
-  store <8 x float> %res, <8 x float>* %b
+  store <8 x float> %res, ptr %b
   ret void
 }
 
-define void @scvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
+define void @scvtf_v16i16_v16f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v16i16_v16f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -701,9 +701,9 @@ define void @scvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
 ; CHECK-NEXT:    scvtf z1.s, p0/m, z2.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = sitofp <16 x i16> %op1 to <16 x float>
-  store <16 x float> %res, <16 x float>* %b
+  store <16 x float> %res, ptr %b
   ret void
 }
 
@@ -727,7 +727,7 @@ define <2 x double> @scvtf_v2i16_v2f64(<2 x i16> %op1) #0 {
   ret <2 x double> %res
 }
 
-define void @scvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
+define void @scvtf_v4i16_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v4i16_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
@@ -740,13 +740,13 @@ define void @scvtf_v4i16_v4f64(<4 x i16>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i16>, <4 x i16>* %a
+  %op1 = load <4 x i16>, ptr %a
   %res = sitofp <4 x i16> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 
-define void @scvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
+define void @scvtf_v8i16_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v8i16_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -768,13 +768,13 @@ define void @scvtf_v8i16_v8f64(<8 x i16>* %a, <8 x double>* %b) #0 {
 ; CHECK-NEXT:    scvtf z0.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op1 = load <8 x i16>, ptr %a
   %res = sitofp <8 x i16> %op1 to <8 x double>
-  store <8 x double> %res, <8 x double>* %b
+  store <8 x double> %res, ptr %b
   ret void
 }
 
-define void @scvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
+define void @scvtf_v16i16_v16f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v16i16_v16f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -814,9 +814,9 @@ define void @scvtf_v16i16_v16f64(<16 x i16>* %a, <16 x double>* %b) #0 {
 ; CHECK-NEXT:    scvtf z0.d, p0/m, z2.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = sitofp <16 x i16> %op1 to <16 x double>
-  store <16 x double> %res, <16 x double>* %b
+  store <16 x double> %res, ptr %b
   ret void
 }
 
@@ -850,7 +850,7 @@ define <4 x half> @scvtf_v4i32_v4f16(<4 x i32> %op1) #0 {
   ret <4 x half> %res
 }
 
-define <8 x half> @scvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
+define <8 x half> @scvtf_v8i32_v8f16(ptr %a) #0 {
 ; CHECK-LABEL: scvtf_v8i32_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -863,7 +863,7 @@ define <8 x half> @scvtf_v8i32_v8f16(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = sitofp <8 x i32> %op1 to <8 x half>
   ret <8 x half> %res
 }
@@ -896,7 +896,7 @@ define <4 x float> @scvtf_v4i32_v4f32(<4 x i32> %op1) #0 {
   ret <4 x float> %res
 }
 
-define void @scvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
+define void @scvtf_v8i32_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v8i32_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -905,9 +905,9 @@ define void @scvtf_v8i32_v8f32(<8 x i32>* %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    scvtf z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = sitofp <8 x i32> %op1 to <8 x float>
-  store <8 x float> %res, <8 x float>* %b
+  store <8 x float> %res, ptr %b
   ret void
 }
 
@@ -928,7 +928,7 @@ define <2 x double> @scvtf_v2i32_v2f64(<2 x i32> %op1) #0 {
   ret <2 x double> %res
 }
 
-define void @scvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
+define void @scvtf_v4i32_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v4i32_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -940,13 +940,13 @@ define void @scvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    scvtf z0.d, p0/m, z0.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i32>, <4 x i32>* %a
+  %op1 = load <4 x i32>, ptr %a
   %res = sitofp <4 x i32> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 
-define void @scvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
+define void @scvtf_v8i32_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v8i32_v8f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -965,13 +965,13 @@ define void @scvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
 ; CHECK-NEXT:    scvtf z1.d, p0/m, z2.d
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = sitofp <8 x i32> %op1 to <8 x double>
-  store <8 x double> %res, <8 x double>* %b
+  store <8 x double> %res, ptr %b
   ret void
 }
 
-define void @scvtf_v16i32_v16f64(<16 x i32>* %a, <16 x double>* %b) #0 {
+define void @scvtf_v16i32_v16f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v16i32_v16f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q2, q3, [x0, #32]
@@ -1007,9 +1007,9 @@ define void @scvtf_v16i32_v16f64(<16 x i32>* %a, <16 x double>* %b) #0 {
 ; CHECK-NEXT:    scvtf z2.d, p0/m, z4.d
 ; CHECK-NEXT:    stp q2, q0, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
+  %op1 = load <16 x i32>, ptr %a
   %res = sitofp <16 x i32> %op1 to <16 x double>
-  store <16 x double> %res, <16 x double>* %b
+  store <16 x double> %res, ptr %b
   ret void
 }
 
@@ -1037,7 +1037,7 @@ define <2 x half> @scvtf_v2i64_v2f16(<2 x i64> %op1) #0 {
   ret <2 x half> %res
 }
 
-define <4 x half> @scvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
+define <4 x half> @scvtf_v4i64_v4f16(ptr %a) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -1054,7 +1054,7 @@ define <4 x half> @scvtf_v4i64_v4f16(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = sitofp <4 x i64> %op1 to <4 x half>
   ret <4 x half> %res
 }
@@ -1076,7 +1076,7 @@ define <2 x float> @scvtf_v2i64_v2f32(<2 x i64> %op1) #0 {
   ret <2 x float> %res
 }
 
-define <4 x float> @scvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
+define <4 x float> @scvtf_v4i64_v4f32(ptr %a) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -1089,7 +1089,7 @@ define <4 x float> @scvtf_v4i64_v4f32(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    splice z0.s, p0, z0.s, z2.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = sitofp <4 x i64> %op1 to <4 x float>
   ret <4 x float> %res
 }
@@ -1110,7 +1110,7 @@ define <2 x double> @scvtf_v2i64_v2f64(<2 x i64> %op1) #0 {
   ret <2 x double> %res
 }
 
-define void @scvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
+define void @scvtf_v4i64_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: scvtf_v4i64_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -1119,9 +1119,9 @@ define void @scvtf_v4i64_v4f64(<4 x i64>* %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    scvtf z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = sitofp <4 x i64> %op1 to <4 x double>
-  store <4 x double> %res, <4 x double>* %b
+  store <4 x double> %res, ptr %b
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
index 953429836eb94..96abfcda50b8a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
@@ -3,45 +3,45 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-define <4 x i8> @load_v4i8(<4 x i8>* %a) #0 {
+define <4 x i8> @load_v4i8(ptr %a) #0 {
 ; CHECK-LABEL: load_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    uunpklo z0.h, z0.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = load <4 x i8>, <4 x i8>* %a
+  %load = load <4 x i8>, ptr %a
   ret <4 x i8> %load
 }
 
-define <8 x i8> @load_v8i8(<8 x i8>* %a) #0 {
+define <8 x i8> @load_v8i8(ptr %a) #0 {
 ; CHECK-LABEL: load_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <8 x i8>, <8 x i8>* %a
+  %load = load <8 x i8>, ptr %a
   ret <8 x i8> %load
 }
 
-define <16 x i8> @load_v16i8(<16 x i8>* %a) #0 {
+define <16 x i8> @load_v16i8(ptr %a) #0 {
 ; CHECK-LABEL: load_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <16 x i8>, <16 x i8>* %a
+  %load = load <16 x i8>, ptr %a
   ret <16 x i8> %load
 }
 
-define <32 x i8> @load_v32i8(<32 x i8>* %a) #0 {
+define <32 x i8> @load_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: load_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <32 x i8>, <32 x i8>* %a
+  %load = load <32 x i8>, ptr %a
   ret <32 x i8> %load
 }
 
-define <2 x i16> @load_v2i16(<2 x i16>* %a) #0 {
+define <2 x i16> @load_v2i16(ptr %a) #0 {
 ; CHECK-LABEL: load_v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -53,178 +53,178 @@ define <2 x i16> @load_v2i16(<2 x i16>* %a) #0 {
 ; CHECK-NEXT:    ldr d0, [sp, #8]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %load = load <2 x i16>, <2 x i16>* %a
+  %load = load <2 x i16>, ptr %a
   ret <2 x i16> %load
 }
 
-define <2 x half> @load_v2f16(<2 x half>* %a) #0 {
+define <2 x half> @load_v2f16(ptr %a) #0 {
 ; CHECK-LABEL: load_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <2 x half>, <2 x half>* %a
+  %load = load <2 x half>, ptr %a
   ret <2 x half> %load
 }
 
-define <4 x i16> @load_v4i16(<4 x i16>* %a) #0 {
+define <4 x i16> @load_v4i16(ptr %a) #0 {
 ; CHECK-LABEL: load_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <4 x i16>, <4 x i16>* %a
+  %load = load <4 x i16>, ptr %a
   ret <4 x i16> %load
 }
 
-define <4 x half> @load_v4f16(<4 x half>* %a) #0 {
+define <4 x half> @load_v4f16(ptr %a) #0 {
 ; CHECK-LABEL: load_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <4 x half>, <4 x half>* %a
+  %load = load <4 x half>, ptr %a
   ret <4 x half> %load
 }
 
-define <8 x i16> @load_v8i16(<8 x i16>* %a) #0 {
+define <8 x i16> @load_v8i16(ptr %a) #0 {
 ; CHECK-LABEL: load_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <8 x i16>, <8 x i16>* %a
+  %load = load <8 x i16>, ptr %a
   ret <8 x i16> %load
 }
 
-define <8 x half> @load_v8f16(<8 x half>* %a) #0 {
+define <8 x half> @load_v8f16(ptr %a) #0 {
 ; CHECK-LABEL: load_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <8 x half>, <8 x half>* %a
+  %load = load <8 x half>, ptr %a
   ret <8 x half> %load
 }
 
-define <16 x i16> @load_v16i16(<16 x i16>* %a) #0 {
+define <16 x i16> @load_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: load_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <16 x i16>, <16 x i16>* %a
+  %load = load <16 x i16>, ptr %a
   ret <16 x i16> %load
 }
 
-define <16 x half> @load_v16f16(<16 x half>* %a) #0 {
+define <16 x half> @load_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: load_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <16 x half>, <16 x half>* %a
+  %load = load <16 x half>, ptr %a
   ret <16 x half> %load
 }
 
-define <2 x i32> @load_v2i32(<2 x i32>* %a) #0 {
+define <2 x i32> @load_v2i32(ptr %a) #0 {
 ; CHECK-LABEL: load_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <2 x i32>, <2 x i32>* %a
+  %load = load <2 x i32>, ptr %a
   ret <2 x i32> %load
 }
 
-define <2 x float> @load_v2f32(<2 x float>* %a) #0 {
+define <2 x float> @load_v2f32(ptr %a) #0 {
 ; CHECK-LABEL: load_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <2 x float>, <2 x float>* %a
+  %load = load <2 x float>, ptr %a
   ret <2 x float> %load
 }
 
-define <4 x i32> @load_v4i32(<4 x i32>* %a) #0 {
+define <4 x i32> @load_v4i32(ptr %a) #0 {
 ; CHECK-LABEL: load_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <4 x i32>, <4 x i32>* %a
+  %load = load <4 x i32>, ptr %a
   ret <4 x i32> %load
 }
 
-define <4 x float> @load_v4f32(<4 x float>* %a) #0 {
+define <4 x float> @load_v4f32(ptr %a) #0 {
 ; CHECK-LABEL: load_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <4 x float>, <4 x float>* %a
+  %load = load <4 x float>, ptr %a
   ret <4 x float> %load
 }
 
-define <8 x i32> @load_v8i32(<8 x i32>* %a) #0 {
+define <8 x i32> @load_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: load_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <8 x i32>, <8 x i32>* %a
+  %load = load <8 x i32>, ptr %a
   ret <8 x i32> %load
 }
 
-define <8 x float> @load_v8f32(<8 x float>* %a) #0 {
+define <8 x float> @load_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: load_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <8 x float>, <8 x float>* %a
+  %load = load <8 x float>, ptr %a
   ret <8 x float> %load
 }
 
-define <1 x i64> @load_v1i64(<1 x i64>* %a) #0 {
+define <1 x i64> @load_v1i64(ptr %a) #0 {
 ; CHECK-LABEL: load_v1i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <1 x i64>, <1 x i64>* %a
+  %load = load <1 x i64>, ptr %a
   ret <1 x i64> %load
 }
 
-define <1 x double> @load_v1f64(<1 x double>* %a) #0 {
+define <1 x double> @load_v1f64(ptr %a) #0 {
 ; CHECK-LABEL: load_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <1 x double>, <1 x double>* %a
+  %load = load <1 x double>, ptr %a
   ret <1 x double> %load
 }
 
-define <2 x i64> @load_v2i64(<2 x i64>* %a) #0 {
+define <2 x i64> @load_v2i64(ptr %a) #0 {
 ; CHECK-LABEL: load_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <2 x i64>, <2 x i64>* %a
+  %load = load <2 x i64>, ptr %a
   ret <2 x i64> %load
 }
 
-define <2 x double> @load_v2f64(<2 x double>* %a) #0 {
+define <2 x double> @load_v2f64(ptr %a) #0 {
 ; CHECK-LABEL: load_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <2 x double>, <2 x double>* %a
+  %load = load <2 x double>, ptr %a
   ret <2 x double> %load
 }
 
-define <4 x i64> @load_v4i64(<4 x i64>* %a) #0 {
+define <4 x i64> @load_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: load_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <4 x i64>, <4 x i64>* %a
+  %load = load <4 x i64>, ptr %a
   ret <4 x i64> %load
 }
 
-define <4 x double> @load_v4f64(<4 x double>* %a) #0 {
+define <4 x double> @load_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: load_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %load = load <4 x double>, <4 x double>* %a
+  %load = load <4 x double>, ptr %a
   ret <4 x double> %load
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
index 34c7d792e8dd6..b417276173a49 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-load.ll
@@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Masked Load
 ;
 
-define <4 x i8> @masked_load_v4i8(<4 x i8>* %src, <4 x i1> %mask) #0 {
+define <4 x i8> @masked_load_v4i8(ptr %src, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -18,11 +18,11 @@ define <4 x i8> @masked_load_v4i8(<4 x i8>* %src, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 8, <4 x i1> %mask, <4 x i8> zeroinitializer)
+  %load = call <4 x i8> @llvm.masked.load.v4i8(ptr %src, i32 8, <4 x i1> %mask, <4 x i8> zeroinitializer)
   ret <4 x i8> %load
 }
 
-define <8 x i8> @masked_load_v8i8(<8 x i8>* %src, <8 x i1> %mask) #0 {
+define <8 x i8> @masked_load_v8i8(ptr %src, <8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -33,11 +33,11 @@ define <8 x i8> @masked_load_v8i8(<8 x i8>* %src, <8 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %src, i32 8, <8 x i1> %mask, <8 x i8> zeroinitializer)
+  %load = call <8 x i8> @llvm.masked.load.v8i8(ptr %src, i32 8, <8 x i1> %mask, <8 x i8> zeroinitializer)
   ret <8 x i8> %load
 }
 
-define <16 x i8> @masked_load_v16i8(<16 x i8>* %src, <16 x i1> %mask) #0 {
+define <16 x i8> @masked_load_v16i8(ptr %src, <16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -48,11 +48,11 @@ define <16 x i8> @masked_load_v16i8(<16 x i8>* %src, <16 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %src, i32 8, <16 x i1> %mask, <16 x i8> zeroinitializer)
+  %load = call <16 x i8> @llvm.masked.load.v16i8(ptr %src, i32 8, <16 x i1> %mask, <16 x i8> zeroinitializer)
   ret <16 x i8> %load
 }
 
-define <32 x i8> @masked_load_v32i8(<32 x i8>* %src, <32 x i1> %mask) #0 {
+define <32 x i8> @masked_load_v32i8(ptr %src, <32 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -129,11 +129,11 @@ define <32 x i8> @masked_load_v32i8(<32 x i8>* %src, <32 x i1> %mask) #0 {
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %src, i32 8, <32 x i1> %mask, <32 x i8> zeroinitializer)
+  %load = call <32 x i8> @llvm.masked.load.v32i8(ptr %src, i32 8, <32 x i1> %mask, <32 x i8> zeroinitializer)
   ret <32 x i8> %load
 }
 
-define <2 x half> @masked_load_v2f16(<2 x half>* %src, <2 x i1> %mask) #0 {
+define <2 x half> @masked_load_v2f16(ptr %src, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -154,11 +154,11 @@ define <2 x half> @masked_load_v2f16(<2 x half>* %src, <2 x i1> %mask) #0 {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %load = call <2 x half> @llvm.masked.load.v2f16(<2 x half>* %src, i32 8, <2 x i1> %mask, <2 x half> zeroinitializer)
+  %load = call <2 x half> @llvm.masked.load.v2f16(ptr %src, i32 8, <2 x i1> %mask, <2 x half> zeroinitializer)
   ret <2 x half> %load
 }
 
-define <4 x half> @masked_load_v4f16(<4 x half>* %src, <4 x i1> %mask) #0 {
+define <4 x half> @masked_load_v4f16(ptr %src, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -169,11 +169,11 @@ define <4 x half> @masked_load_v4f16(<4 x half>* %src, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <4 x half> @llvm.masked.load.v4f16(<4 x half>* %src, i32 8, <4 x i1> %mask, <4 x half> zeroinitializer)
+  %load = call <4 x half> @llvm.masked.load.v4f16(ptr %src, i32 8, <4 x i1> %mask, <4 x half> zeroinitializer)
   ret <4 x half> %load
 }
 
-define <8 x half> @masked_load_v8f16(<8 x half>* %src, <8 x i1> %mask) #0 {
+define <8 x half> @masked_load_v8f16(ptr %src, <8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -185,11 +185,11 @@ define <8 x half> @masked_load_v8f16(<8 x half>* %src, <8 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <8 x half> @llvm.masked.load.v8f16(<8 x half>* %src, i32 8, <8 x i1> %mask, <8 x half> zeroinitializer)
+  %load = call <8 x half> @llvm.masked.load.v8f16(ptr %src, i32 8, <8 x i1> %mask, <8 x half> zeroinitializer)
   ret <8 x half> %load
 }
 
-define <16 x half> @masked_load_v16f16(<16 x half>* %src, <16 x i1> %mask) #0 {
+define <16 x half> @masked_load_v16f16(ptr %src, <16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -209,11 +209,11 @@ define <16 x half> @masked_load_v16f16(<16 x half>* %src, <16 x i1> %mask) #0 {
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-  %load = call <16 x half> @llvm.masked.load.v16f16(<16 x half>* %src, i32 8, <16 x i1> %mask, <16 x half> zeroinitializer)
+  %load = call <16 x half> @llvm.masked.load.v16f16(ptr %src, i32 8, <16 x i1> %mask, <16 x half> zeroinitializer)
   ret <16 x half> %load
 }
 
-define <2 x float> @masked_load_v2f32(<2 x float>* %src, <2 x i1> %mask) #0 {
+define <2 x float> @masked_load_v2f32(ptr %src, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -224,11 +224,11 @@ define <2 x float> @masked_load_v2f32(<2 x float>* %src, <2 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <2 x float> @llvm.masked.load.v2f32(<2 x float>* %src, i32 8, <2 x i1> %mask, <2 x float> zeroinitializer)
+  %load = call <2 x float> @llvm.masked.load.v2f32(ptr %src, i32 8, <2 x i1> %mask, <2 x float> zeroinitializer)
   ret <2 x float> %load
 }
 
-define <4 x float> @masked_load_v4f32(<4 x float>* %src, <4 x i1> %mask) #0 {
+define <4 x float> @masked_load_v4f32(ptr %src, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -240,11 +240,11 @@ define <4 x float> @masked_load_v4f32(<4 x float>* %src, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %src, i32 8, <4 x i1> %mask, <4 x float> zeroinitializer)
+  %load = call <4 x float> @llvm.masked.load.v4f32(ptr %src, i32 8, <4 x i1> %mask, <4 x float> zeroinitializer)
   ret <4 x float> %load
 }
 
-define <8 x float> @masked_load_v8f32(<8 x float>* %src, <8 x i1> %mask) #0 {
+define <8 x float> @masked_load_v8f32(ptr %src, <8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -289,11 +289,11 @@ define <8 x float> @masked_load_v8f32(<8 x float>* %src, <8 x i1> %mask) #0 {
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  %load = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %src, i32 8, <8 x i1> %mask, <8 x float> zeroinitializer)
+  %load = call <8 x float> @llvm.masked.load.v8f32(ptr %src, i32 8, <8 x i1> %mask, <8 x float> zeroinitializer)
   ret <8 x float> %load
 }
 
-define <2 x double> @masked_load_v2f64(<2 x double>* %src, <2 x i1> %mask) #0 {
+define <2 x double> @masked_load_v2f64(ptr %src, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -305,11 +305,11 @@ define <2 x double> @masked_load_v2f64(<2 x double>* %src, <2 x i1> %mask) #0 {
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %load = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %src, i32 8, <2 x i1> %mask, <2 x double> zeroinitializer)
+  %load = call <2 x double> @llvm.masked.load.v2f64(ptr %src, i32 8, <2 x i1> %mask, <2 x double> zeroinitializer)
   ret <2 x double> %load
 }
 
-define <4 x double> @masked_load_v4f64(<4 x double>* %src, <4 x i1> %mask) #0 {
+define <4 x double> @masked_load_v4f64(ptr %src, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_load_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -330,25 +330,25 @@ define <4 x double> @masked_load_v4f64(<4 x double>* %src, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
-  %load = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %src, i32 8, <4 x i1> %mask, <4 x double> zeroinitializer)
+  %load = call <4 x double> @llvm.masked.load.v4f64(ptr %src, i32 8, <4 x i1> %mask, <4 x double> zeroinitializer)
   ret <4 x double> %load
 }
 
-declare <4 x i8> @llvm.masked.load.v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
-declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
-declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
-declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
+declare <4 x i8> @llvm.masked.load.v4i8(ptr, i32, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.load.v8i8(ptr, i32, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.load.v16i8(ptr, i32, <16 x i1>, <16 x i8>)
+declare <32 x i8> @llvm.masked.load.v32i8(ptr, i32, <32 x i1>, <32 x i8>)
 
-declare <2 x half> @llvm.masked.load.v2f16(<2 x half>*, i32, <2 x i1>, <2 x half>)
-declare <4 x half> @llvm.masked.load.v4f16(<4 x half>*, i32, <4 x i1>, <4 x half>)
-declare <8 x half> @llvm.masked.load.v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
-declare <16 x half> @llvm.masked.load.v16f16(<16 x half>*, i32, <16 x i1>, <16 x half>)
+declare <2 x half> @llvm.masked.load.v2f16(ptr, i32, <2 x i1>, <2 x half>)
+declare <4 x half> @llvm.masked.load.v4f16(ptr, i32, <4 x i1>, <4 x half>)
+declare <8 x half> @llvm.masked.load.v8f16(ptr, i32, <8 x i1>, <8 x half>)
+declare <16 x half> @llvm.masked.load.v16f16(ptr, i32, <16 x i1>, <16 x half>)
 
-declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>)
-declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
-declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
+declare <2 x float> @llvm.masked.load.v2f32(ptr, i32, <2 x i1>, <2 x float>)
+declare <4 x float> @llvm.masked.load.v4f32(ptr, i32, <4 x i1>, <4 x float>)
+declare <8 x float> @llvm.masked.load.v8f32(ptr, i32, <8 x i1>, <8 x float>)
 
-declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
-declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
+declare <2 x double> @llvm.masked.load.v2f64(ptr, i32, <2 x i1>, <2 x double>)
+declare <4 x double> @llvm.masked.load.v4f64(ptr, i32, <4 x i1>, <4 x double>)
 
 attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
index 930376ec4f6c9..32b8112b1eb9a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-masked-store.ll
@@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; Masked Store
 ;
 
-define void @masked_store_v4i8(<4 x i8>* %dst, <4 x i1> %mask) #0 {
+define void @masked_store_v4i8(ptr %dst, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -18,11 +18,11 @@ define void @masked_store_v4i8(<4 x i8>* %dst, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4i8(<4 x i8> zeroinitializer, <4 x i8>* %dst, i32 8, <4 x i1> %mask)
+  call void @llvm.masked.store.v4i8(<4 x i8> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v8i8(<8 x i8>* %dst, <8 x i1> %mask) #0 {
+define void @masked_store_v8i8(ptr %dst, <8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -33,11 +33,11 @@ define void @masked_store_v8i8(<8 x i8>* %dst, <8 x i1> %mask) #0 {
 ; CHECK-NEXT:    mov z0.b, #0 // =0x0
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v8i8(<8 x i8> zeroinitializer, <8 x i8>* %dst, i32 8, <8 x i1> %mask)
+  call void @llvm.masked.store.v8i8(<8 x i8> zeroinitializer, ptr %dst, i32 8, <8 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v16i8(<16 x i8>* %dst, <16 x i1> %mask) #0 {
+define void @masked_store_v16i8(ptr %dst, <16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -48,11 +48,11 @@ define void @masked_store_v16i8(<16 x i8>* %dst, <16 x i1> %mask) #0 {
 ; CHECK-NEXT:    mov z0.b, #0 // =0x0
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v16i8(<16 x i8> zeroinitializer, <16 x i8>* %dst, i32 8, <16 x i1> %mask)
+  call void @llvm.masked.store.v16i8(<16 x i8> zeroinitializer, ptr %dst, i32 8, <16 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v32i8(<32 x i8>* %dst, <32 x i1> %mask) #0 {
+define void @masked_store_v32i8(ptr %dst, <32 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -129,11 +129,11 @@ define void @masked_store_v32i8(<32 x i8>* %dst, <32 x i1> %mask) #0 {
 ; CHECK-NEXT:    st1b { z0.b }, p1, [x0]
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v32i8(<32 x i8> zeroinitializer, <32 x i8>* %dst, i32 8, <32 x i1> %mask)
+  call void @llvm.masked.store.v32i8(<32 x i8> zeroinitializer, ptr %dst, i32 8, <32 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v2f16(<2 x half>* %dst, <2 x i1> %mask) #0 {
+define void @masked_store_v2f16(ptr %dst, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -154,11 +154,11 @@ define void @masked_store_v2f16(<2 x half>* %dst, <2 x i1> %mask) #0 {
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v2f16(<2 x half> zeroinitializer, <2 x half>* %dst, i32 8, <2 x i1> %mask)
+  call void @llvm.masked.store.v2f16(<2 x half> zeroinitializer, ptr %dst, i32 8, <2 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v4f16(<4 x half>* %dst, <4 x i1> %mask) #0 {
+define void @masked_store_v4f16(ptr %dst, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -169,11 +169,11 @@ define void @masked_store_v4f16(<4 x half>* %dst, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4f16(<4 x half> zeroinitializer, <4 x half>* %dst, i32 8, <4 x i1> %mask)
+  call void @llvm.masked.store.v4f16(<4 x half> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v8f16(<8 x half>* %dst, <8 x i1> %mask) #0 {
+define void @masked_store_v8f16(ptr %dst, <8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -185,11 +185,11 @@ define void @masked_store_v8f16(<8 x half>* %dst, <8 x i1> %mask) #0 {
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v8f16(<8 x half> zeroinitializer, <8 x half>* %dst, i32 8, <8 x i1> %mask)
+  call void @llvm.masked.store.v8f16(<8 x half> zeroinitializer, ptr %dst, i32 8, <8 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v16f16(<16 x half>* %dst, <16 x i1> %mask) #0 {
+define void @masked_store_v16f16(ptr %dst, <16 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
@@ -209,11 +209,11 @@ define void @masked_store_v16f16(<16 x half>* %dst, <16 x i1> %mask) #0 {
 ; CHECK-NEXT:    st1h { z1.h }, p1, [x0, x8, lsl #1]
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v16f16(<16 x half> zeroinitializer, <16 x half>* %dst, i32 8, <16 x i1> %mask)
+  call void @llvm.masked.store.v16f16(<16 x half> zeroinitializer, ptr %dst, i32 8, <16 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v4f32(<4 x float>* %dst, <4 x i1> %mask) #0 {
+define void @masked_store_v4f32(ptr %dst, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -225,11 +225,11 @@ define void @masked_store_v4f32(<4 x float>* %dst, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    mov z0.s, #0 // =0x0
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4f32(<4 x float> zeroinitializer, <4 x float>* %dst, i32 8, <4 x i1> %mask)
+  call void @llvm.masked.store.v4f32(<4 x float> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v8f32(<8 x float>* %dst, <8 x i1> %mask) #0 {
+define void @masked_store_v8f32(ptr %dst, <8 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -275,11 +275,11 @@ define void @masked_store_v8f32(<8 x float>* %dst, <8 x i1> %mask) #0 {
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v8f32(<8 x float> zeroinitializer, <8 x float>* %dst, i32 8, <8 x i1> %mask)
+  call void @llvm.masked.store.v8f32(<8 x float> zeroinitializer, ptr %dst, i32 8, <8 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v2f64(<2 x double>* %dst, <2 x i1> %mask) #0 {
+define void @masked_store_v2f64(ptr %dst, <2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -291,11 +291,11 @@ define void @masked_store_v2f64(<2 x double>* %dst, <2 x i1> %mask) #0 {
 ; CHECK-NEXT:    mov z0.d, #0 // =0x0
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v2f64(<2 x double> zeroinitializer, <2 x double>* %dst, i32 8, <2 x i1> %mask)
+  call void @llvm.masked.store.v2f64(<2 x double> zeroinitializer, ptr %dst, i32 8, <2 x i1> %mask)
   ret void
 }
 
-define void @masked_store_v4f64(<4 x double>* %dst, <4 x i1> %mask) #0 {
+define void @masked_store_v4f64(ptr %dst, <4 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_store_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -315,21 +315,21 @@ define void @masked_store_v4f64(<4 x double>* %dst, <4 x i1> %mask) #0 {
 ; CHECK-NEXT:    st1d { z0.d }, p1, [x0, x8, lsl #3]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
-  call void @llvm.masked.store.v4f64(<4 x double> zeroinitializer, <4 x double>* %dst, i32 8, <4 x i1> %mask)
+  call void @llvm.masked.store.v4f64(<4 x double> zeroinitializer, ptr %dst, i32 8, <4 x i1> %mask)
   ret void
 }
 
-declare void @llvm.masked.store.v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
-declare void @llvm.masked.store.v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
-declare void @llvm.masked.store.v2f16(<2 x half>, <2 x half>*, i32, <2 x i1>)
-declare void @llvm.masked.store.v4f16(<4 x half>, <4 x half>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v16f16(<16 x half>, <16 x half>*, i32, <16 x i1>)
-declare void @llvm.masked.store.v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
-declare void @llvm.masked.store.v8f32(<8 x float>, <8 x float>*, i32, <8 x i1>)
-declare void @llvm.masked.store.v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
-declare void @llvm.masked.store.v4f64(<4 x double>, <4 x double>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v4i8(<4 x i8>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8i8(<8 x i8>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v16i8(<16 x i8>, ptr, i32, <16 x i1>)
+declare void @llvm.masked.store.v32i8(<32 x i8>, ptr, i32, <32 x i1>)
+declare void @llvm.masked.store.v2f16(<2 x half>, ptr, i32, <2 x i1>)
+declare void @llvm.masked.store.v4f16(<4 x half>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8f16(<8 x half>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v16f16(<16 x half>, ptr, i32, <16 x i1>)
+declare void @llvm.masked.store.v4f32(<4 x float>, ptr, i32, <4 x i1>)
+declare void @llvm.masked.store.v8f32(<8 x float>, ptr, i32, <8 x i1>)
+declare void @llvm.masked.store.v2f64(<2 x double>, ptr, i32, <2 x i1>)
+declare void @llvm.masked.store.v4f64(<4 x double>, ptr, i32, <4 x i1>)
 
 attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
index f41e2ebb0a6e7..6b48ead00dcdb 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-rev.ll
@@ -44,7 +44,7 @@ define <16 x i8> @bitreverse_v16i8(<16 x i8> %op) #0 {
   ret <16 x i8> %res
 }
 
-define void @bitreverse_v32i8(<32 x i8>* %a) #0 {
+define void @bitreverse_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: bitreverse_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -53,9 +53,9 @@ define void @bitreverse_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    rbit z1.b, p0/m, z1.b
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <32 x i8>, <32 x i8>* %a
+  %op = load <32 x i8>, ptr %a
   %res = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %op)
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -96,7 +96,7 @@ define <8 x i16> @bitreverse_v8i16(<8 x i16> %op) #0 {
   ret <8 x i16> %res
 }
 
-define void @bitreverse_v16i16(<16 x i16>* %a) #0 {
+define void @bitreverse_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: bitreverse_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -105,9 +105,9 @@ define void @bitreverse_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    rbit z1.h, p0/m, z1.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %res = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %op)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -135,7 +135,7 @@ define <4 x i32> @bitreverse_v4i32(<4 x i32> %op) #0 {
   ret <4 x i32> %res
 }
 
-define void @bitreverse_v8i32(<8 x i32>* %a) #0 {
+define void @bitreverse_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: bitreverse_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -144,9 +144,9 @@ define void @bitreverse_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    rbit z1.s, p0/m, z1.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %res = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %op)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -174,7 +174,7 @@ define <2 x i64> @bitreverse_v2i64(<2 x i64> %op) #0 {
   ret <2 x i64> %res
 }
 
-define void @bitreverse_v4i64(<4 x i64>* %a) #0 {
+define void @bitreverse_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: bitreverse_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -183,9 +183,9 @@ define void @bitreverse_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    rbit z1.d, p0/m, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %res = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %op)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 
@@ -247,7 +247,7 @@ define <8 x i16> @bswap_v8i16(<8 x i16> %op) #0 {
   ret <8 x i16> %res
 }
 
-define void @bswap_v16i16(<16 x i16>* %a) #0 {
+define void @bswap_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: bswap_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -262,9 +262,9 @@ define void @bswap_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    orr z0.d, z0.d, z2.d
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <16 x i16>, <16 x i16>* %a
+  %op = load <16 x i16>, ptr %a
   %res = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %op)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -314,7 +314,7 @@ define <4 x i32> @bswap_v4i32(<4 x i32> %op) #0 {
   ret <4 x i32> %res
 }
 
-define void @bswap_v8i32(<8 x i32>* %a) #0 {
+define void @bswap_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: bswap_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -345,9 +345,9 @@ define void @bswap_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    orr z0.d, z0.d, z2.d
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <8 x i32>, <8 x i32>* %a
+  %op = load <8 x i32>, ptr %a
   %res = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %op)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -431,7 +431,7 @@ define <2 x i64> @bswap_v2i64(<2 x i64> %op) #0 {
   ret <2 x i64> %res
 }
 
-define void @bswap_v4i64(<4 x i64>* %a) #0 {
+define void @bswap_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: bswap_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
@@ -496,9 +496,9 @@ define void @bswap_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    orr z0.d, z0.d, z2.d
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op = load <4 x i64>, <4 x i64>* %a
+  %op = load <4 x i64>, ptr %a
   %res = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %op)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
index 9045a383f2dc2..173e3b44db3fb 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-sdiv-pow2.ll
@@ -41,7 +41,7 @@ define <16 x i8> @sdiv_v16i8(<16 x i8> %op1) #0 {
   ret <16 x i8> %res
 }
 
-define void @sdiv_v32i8(<32 x i8>* %a) #0 {
+define void @sdiv_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: sdiv_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -50,9 +50,9 @@ define void @sdiv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    asrd z1.b, p0/m, z1.b, #5
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <32 x i8>, <32 x i8>* %a
+  %op1 = load <32 x i8>, ptr %a
   %res = sdiv <32 x i8> %op1, shufflevector (<32 x i8> insertelement (<32 x i8> poison, i8 32, i32 0), <32 x i8> poison, <32 x i32> zeroinitializer)
-  store <32 x i8> %res, <32 x i8>* %a
+  store <32 x i8> %res, ptr %a
   ret void
 }
 
@@ -94,7 +94,7 @@ define <8 x i16> @sdiv_v8i16(<8 x i16> %op1) #0 {
   ret <8 x i16> %res
 }
 
-define void @sdiv_v16i16(<16 x i16>* %a) #0 {
+define void @sdiv_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: sdiv_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -103,9 +103,9 @@ define void @sdiv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    asrd z1.h, p0/m, z1.h, #5
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op1 = load <16 x i16>, ptr %a
   %res = sdiv <16 x i16> %op1, shufflevector (<16 x i16> insertelement (<16 x i16> poison, i16 32, i32 0), <16 x i16> poison, <16 x i32> zeroinitializer)
-  store <16 x i16> %res, <16 x i16>* %a
+  store <16 x i16> %res, ptr %a
   ret void
 }
 
@@ -133,7 +133,7 @@ define <4 x i32> @sdiv_v4i32(<4 x i32> %op1) #0 {
   ret <4 x i32> %res
 }
 
-define void @sdiv_v8i32(<8 x i32>* %a) #0 {
+define void @sdiv_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: sdiv_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -142,9 +142,9 @@ define void @sdiv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    asrd z1.s, p0/m, z1.s, #5
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op1 = load <8 x i32>, ptr %a
   %res = sdiv <8 x i32> %op1, shufflevector (<8 x i32> insertelement (<8 x i32> poison, i32 32, i32 0), <8 x i32> poison, <8 x i32> zeroinitializer)
-  store <8 x i32> %res, <8 x i32>* %a
+  store <8 x i32> %res, ptr %a
   ret void
 }
 
@@ -173,7 +173,7 @@ define <2 x i64> @sdiv_v2i64(<2 x i64> %op1) #0 {
   ret <2 x i64> %res
 }
 
-define void @sdiv_v4i64(<4 x i64>* %a) #0 {
+define void @sdiv_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: sdiv_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -182,9 +182,9 @@ define void @sdiv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    asrd z1.d, p0/m, z1.d, #5
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op1 = load <4 x i64>, ptr %a
   %res = sdiv <4 x i64> %op1, shufflevector (<4 x i64> insertelement (<4 x i64> poison, i64 32, i32 0), <4 x i64> poison, <4 x i32> zeroinitializer)
-  store <4 x i64> %res, <4 x i64>* %a
+  store <4 x i64> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
index ffea4b4c50072..83894cff8259a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
@@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; bigger than NEON. However, having no support opens us up to a code generator
 ; hang when expanding BUILD_VECTOR. Here we just validate the promblematic case
 ; successfully exits code generation.
-define void @hang_when_merging_stores_after_legalisation(<8 x i32>* %a, <2 x i32> %b) #0 {
+define void @hang_when_merging_stores_after_legalisation(ptr %a, <2 x i32> %b) #0 {
 ; CHECK-LABEL: hang_when_merging_stores_after_legalisation:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -16,12 +16,12 @@ define void @hang_when_merging_stores_after_legalisation(<8 x i32>* %a, <2 x i32
 ; CHECK-NEXT:    ret
   %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <8 x i32> zeroinitializer
   %interleaved.vec = shufflevector <8 x i32> %splat, <8 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i32> %interleaved.vec, <8 x i32>* %a, align 4
+  store <8 x i32> %interleaved.vec, ptr %a, align 4
   ret void
 }
 
 ; Ensure we don't crash when trying to lower a shuffle via an extract
-define void @crash_when_lowering_extract_shuffle(<32 x i32>* %dst, i1 %cond) #0 {
+define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) #0 {
 ; CHECK-LABEL: crash_when_lowering_extract_shuffle:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ret
@@ -29,9 +29,9 @@ define void @crash_when_lowering_extract_shuffle(<32 x i32>* %dst, i1 %cond) #0
   br i1 %cond, label %exit, label %vector.body
 
 vector.body:
-  %1 = load <32 x i32>, <32 x i32>* %dst, align 16
+  %1 = load <32 x i32>, ptr %dst, align 16
   %predphi = select <32 x i1> %broadcast.splat, <32 x i32> zeroinitializer, <32 x i32> %1
-  store <32 x i32> %predphi, <32 x i32>* %dst, align 16
+  store <32 x i32> %predphi, ptr %dst, align 16
   br label %exit
 
 exit:

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
index 9c32877584241..a8203cd639af1 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-splat-vector.ll
@@ -41,7 +41,7 @@ define <16 x i8> @splat_v16i8(i8 %a) #0 {
   ret <16 x i8> %splat
 }
 
-define void @splat_v32i8(i8 %a, <32 x i8>* %b) #0 {
+define void @splat_v32i8(i8 %a, ptr %b) #0 {
 ; CHECK-LABEL: splat_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, w0
@@ -49,7 +49,7 @@ define void @splat_v32i8(i8 %a, <32 x i8>* %b) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <32 x i8> undef, i8 %a, i64 0
   %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
-  store <32 x i8> %splat, <32 x i8>* %b
+  store <32 x i8> %splat, ptr %b
   ret void
 }
 
@@ -86,7 +86,7 @@ define <8 x i16> @splat_v8i16(i16 %a) #0 {
   ret <8 x i16> %splat
 }
 
-define void @splat_v16i16(i16 %a, <16 x i16>* %b) #0 {
+define void @splat_v16i16(i16 %a, ptr %b) #0 {
 ; CHECK-LABEL: splat_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, w0
@@ -94,7 +94,7 @@ define void @splat_v16i16(i16 %a, <16 x i16>* %b) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <16 x i16> undef, i16 %a, i64 0
   %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
-  store <16 x i16> %splat, <16 x i16>* %b
+  store <16 x i16> %splat, ptr %b
   ret void
 }
 
@@ -120,7 +120,7 @@ define <4 x i32> @splat_v4i32(i32 %a) #0 {
   ret <4 x i32> %splat
 }
 
-define void @splat_v8i32(i32 %a, <8 x i32>* %b) #0 {
+define void @splat_v8i32(i32 %a, ptr %b) #0 {
 ; CHECK-LABEL: splat_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, w0
@@ -128,7 +128,7 @@ define void @splat_v8i32(i32 %a, <8 x i32>* %b) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <8 x i32> undef, i32 %a, i64 0
   %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
-  store <8 x i32> %splat, <8 x i32>* %b
+  store <8 x i32> %splat, ptr %b
   ret void
 }
 
@@ -154,7 +154,7 @@ define <2 x i64> @splat_v2i64(i64 %a) #0 {
   ret <2 x i64> %splat
 }
 
-define void @splat_v4i64(i64 %a, <4 x i64>* %b) #0 {
+define void @splat_v4i64(i64 %a, ptr %b) #0 {
 ; CHECK-LABEL: splat_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, x0
@@ -162,7 +162,7 @@ define void @splat_v4i64(i64 %a, <4 x i64>* %b) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <4 x i64> undef, i64 %a, i64 0
   %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
-  store <4 x i64> %splat, <4 x i64>* %b
+  store <4 x i64> %splat, ptr %b
   ret void
 }
 
@@ -206,7 +206,7 @@ define <8 x half> @splat_v8f16(half %a) #0 {
   ret <8 x half> %splat
 }
 
-define void @splat_v16f16(half %a, <16 x half>* %b) #0 {
+define void @splat_v16f16(half %a, ptr %b) #0 {
 ; CHECK-LABEL: splat_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $h0 killed $h0 def $z0
@@ -215,7 +215,7 @@ define void @splat_v16f16(half %a, <16 x half>* %b) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <16 x half> undef, half %a, i64 0
   %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer
-  store <16 x half> %splat, <16 x half>* %b
+  store <16 x half> %splat, ptr %b
   ret void
 }
 
@@ -243,7 +243,7 @@ define <4 x float> @splat_v4f32(float %a, <4 x float> %op2) #0 {
   ret <4 x float> %splat
 }
 
-define void @splat_v8f32(float %a, <8 x float>* %b) #0 {
+define void @splat_v8f32(float %a, ptr %b) #0 {
 ; CHECK-LABEL: splat_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $z0
@@ -252,7 +252,7 @@ define void @splat_v8f32(float %a, <8 x float>* %b) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <8 x float> undef, float %a, i64 0
   %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer
-  store <8 x float> %splat, <8 x float>* %b
+  store <8 x float> %splat, ptr %b
   ret void
 }
 
@@ -277,7 +277,7 @@ define <2 x double> @splat_v2f64(double %a, <2 x double> %op2) #0 {
   ret <2 x double> %splat
 }
 
-define void @splat_v4f64(double %a, <4 x double>* %b) #0 {
+define void @splat_v4f64(double %a, ptr %b) #0 {
 ; CHECK-LABEL: splat_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
@@ -286,7 +286,7 @@ define void @splat_v4f64(double %a, <4 x double>* %b) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <4 x double> undef, double %a, i64 0
   %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer
-  store <4 x double> %splat, <4 x double>* %b
+  store <4 x double> %splat, ptr %b
   ret void
 }
 
@@ -294,7 +294,7 @@ define void @splat_v4f64(double %a, <4 x double>* %b) #0 {
 ; DUP (integer immediate)
 ;
 
-define void @splat_imm_v32i8(<32 x i8>* %a) #0 {
+define void @splat_imm_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: splat_imm_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, #1 // =0x1
@@ -302,11 +302,11 @@ define void @splat_imm_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <32 x i8> undef, i8 1, i64 0
   %splat = shufflevector <32 x i8> %insert, <32 x i8> undef, <32 x i32> zeroinitializer
-  store <32 x i8> %splat, <32 x i8>* %a
+  store <32 x i8> %splat, ptr %a
   ret void
 }
 
-define void @splat_imm_v16i16(<16 x i16>* %a) #0 {
+define void @splat_imm_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: splat_imm_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #2 // =0x2
@@ -314,11 +314,11 @@ define void @splat_imm_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <16 x i16> undef, i16 2, i64 0
   %splat = shufflevector <16 x i16> %insert, <16 x i16> undef, <16 x i32> zeroinitializer
-  store <16 x i16> %splat, <16 x i16>* %a
+  store <16 x i16> %splat, ptr %a
   ret void
 }
 
-define void @splat_imm_v8i32(<8 x i32>* %a) #0 {
+define void @splat_imm_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: splat_imm_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, #3 // =0x3
@@ -326,11 +326,11 @@ define void @splat_imm_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <8 x i32> undef, i32 3, i64 0
   %splat = shufflevector <8 x i32> %insert, <8 x i32> undef, <8 x i32> zeroinitializer
-  store <8 x i32> %splat, <8 x i32>* %a
+  store <8 x i32> %splat, ptr %a
   ret void
 }
 
-define void @splat_imm_v4i64(<4 x i64>* %a) #0 {
+define void @splat_imm_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: splat_imm_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #4 // =0x4
@@ -338,7 +338,7 @@ define void @splat_imm_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <4 x i64> undef, i64 4, i64 0
   %splat = shufflevector <4 x i64> %insert, <4 x i64> undef, <4 x i32> zeroinitializer
-  store <4 x i64> %splat, <4 x i64>* %a
+  store <4 x i64> %splat, ptr %a
   ret void
 }
 
@@ -346,7 +346,7 @@ define void @splat_imm_v4i64(<4 x i64>* %a) #0 {
 ; DUP (floating-point immediate)
 ;
 
-define void @splat_imm_v16f16(<16 x half>* %a) #0 {
+define void @splat_imm_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: splat_imm_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.h, #5.00000000
@@ -354,11 +354,11 @@ define void @splat_imm_v16f16(<16 x half>* %a) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <16 x half> undef, half 5.0, i64 0
   %splat = shufflevector <16 x half> %insert, <16 x half> undef, <16 x i32> zeroinitializer
-  store <16 x half> %splat, <16 x half>* %a
+  store <16 x half> %splat, ptr %a
   ret void
 }
 
-define void @splat_imm_v8f32(<8 x float>* %a) #0 {
+define void @splat_imm_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: splat_imm_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.s, #6.00000000
@@ -366,11 +366,11 @@ define void @splat_imm_v8f32(<8 x float>* %a) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <8 x float> undef, float 6.0, i64 0
   %splat = shufflevector <8 x float> %insert, <8 x float> undef, <8 x i32> zeroinitializer
-  store <8 x float> %splat, <8 x float>* %a
+  store <8 x float> %splat, ptr %a
   ret void
 }
 
-define void @splat_imm_v4f64(<4 x double>* %a) #0 {
+define void @splat_imm_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: splat_imm_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov z0.d, #7.00000000
@@ -378,7 +378,7 @@ define void @splat_imm_v4f64(<4 x double>* %a) #0 {
 ; CHECK-NEXT:    ret
   %insert = insertelement <4 x double> undef, double 7.0, i64 0
   %splat = shufflevector <4 x double> %insert, <4 x double> undef, <4 x i32> zeroinitializer
-  store <4 x double> %splat, <4 x double>* %a
+  store <4 x double> %splat, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
index 8298281d6d8db..ca08895b9df27 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
@@ -3,240 +3,240 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-define void @store_v4i8(<4 x i8>* %a) #0 {
+define void @store_v4i8(ptr %a) #0 {
 ; CHECK-LABEL: store_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x i8> zeroinitializer, <4 x i8>* %a
+  store <4 x i8> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v8i8(<8 x i8>* %a) #0 {
+define void @store_v8i8(ptr %a) #0 {
 ; CHECK-LABEL: store_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, #0 // =0x0
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
-  store <8 x i8> zeroinitializer, <8 x i8>* %a
+  store <8 x i8> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v16i8(<16 x i8>* %a) #0 {
+define void @store_v16i8(ptr %a) #0 {
 ; CHECK-LABEL: store_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, #0 // =0x0
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
-  store <16 x i8> zeroinitializer, <16 x i8>* %a
+  store <16 x i8> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v32i8(<32 x i8>* %a) #0 {
+define void @store_v32i8(ptr %a) #0 {
 ; CHECK-LABEL: store_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, #0 // =0x0
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  store <32 x i8> zeroinitializer, <32 x i8>* %a
+  store <32 x i8> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v2i16(<2 x i16>* %a) #0 {
+define void @store_v2i16(ptr %a) #0 {
 ; CHECK-LABEL: store_v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    mov z0.s, #0 // =0x0
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x i16> zeroinitializer, <2 x i16>* %a
+  store <2 x i16> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v2f16(<2 x half>* %a) #0 {
+define void @store_v2f16(ptr %a) #0 {
 ; CHECK-LABEL: store_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x half> zeroinitializer, <2 x half>* %a
+  store <2 x half> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v4i16(<4 x i16>* %a) #0 {
+define void @store_v4i16(ptr %a) #0 {
 ; CHECK-LABEL: store_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x i16> zeroinitializer, <4 x i16>* %a
+  store <4 x i16> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v4f16(<4 x half>* %a) #0 {
+define void @store_v4f16(ptr %a) #0 {
 ; CHECK-LABEL: store_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x half> zeroinitializer, <4 x half>* %a
+  store <4 x half> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v8i16(<8 x i16>* %a) #0 {
+define void @store_v8i16(ptr %a) #0 {
 ; CHECK-LABEL: store_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
-  store <8 x i16> zeroinitializer, <8 x i16>* %a
+  store <8 x i16> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v8f16(<8 x half>* %a) #0 {
+define void @store_v8f16(ptr %a) #0 {
 ; CHECK-LABEL: store_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
-  store <8 x half> zeroinitializer, <8 x half>* %a
+  store <8 x half> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v16i16(<16 x i16>* %a) #0 {
+define void @store_v16i16(ptr %a) #0 {
 ; CHECK-LABEL: store_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  store <16 x i16> zeroinitializer, <16 x i16>* %a
+  store <16 x i16> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v16f16(<16 x half>* %a) #0 {
+define void @store_v16f16(ptr %a) #0 {
 ; CHECK-LABEL: store_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, #0 // =0x0
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  store <16 x half> zeroinitializer, <16 x half>* %a
+  store <16 x half> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v2i32(<2 x i32>* %a) #0 {
+define void @store_v2i32(ptr %a) #0 {
 ; CHECK-LABEL: store_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str xzr, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x i32> zeroinitializer, <2 x i32>* %a
+  store <2 x i32> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v2f32(<2 x float>* %a) #0 {
+define void @store_v2f32(ptr %a) #0 {
 ; CHECK-LABEL: store_v2f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str xzr, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x float> zeroinitializer, <2 x float>* %a
+  store <2 x float> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v4i32(<4 x i32>* %a) #0 {
+define void @store_v4i32(ptr %a) #0 {
 ; CHECK-LABEL: store_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp xzr, xzr, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x i32> zeroinitializer, <4 x i32>* %a
+  store <4 x i32> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v4f32(<4 x float>* %a) #0 {
+define void @store_v4f32(ptr %a) #0 {
 ; CHECK-LABEL: store_v4f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp xzr, xzr, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x float> zeroinitializer, <4 x float>* %a
+  store <4 x float> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v8i32(<8 x i32>* %a) #0 {
+define void @store_v8i32(ptr %a) #0 {
 ; CHECK-LABEL: store_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, #0 // =0x0
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  store <8 x i32> zeroinitializer, <8 x i32>* %a
+  store <8 x i32> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v8f32(<8 x float>* %a) #0 {
+define void @store_v8f32(ptr %a) #0 {
 ; CHECK-LABEL: store_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, #0 // =0x0
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  store <8 x float> zeroinitializer, <8 x float>* %a
+  store <8 x float> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v1i64(<1 x i64>* %a) #0 {
+define void @store_v1i64(ptr %a) #0 {
 ; CHECK-LABEL: store_v1i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #0 // =0x0
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
-  store <1 x i64> zeroinitializer, <1 x i64>* %a
+  store <1 x i64> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v1f64(<1 x double>* %a) #0 {
+define void @store_v1f64(ptr %a) #0 {
 ; CHECK-LABEL: store_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi d0, #0000000000000000
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
-  store <1 x double> zeroinitializer, <1 x double>* %a
+  store <1 x double> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v2i64(<2 x i64>* %a) #0 {
+define void @store_v2i64(ptr %a) #0 {
 ; CHECK-LABEL: store_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp xzr, xzr, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x i64> zeroinitializer, <2 x i64>* %a
+  store <2 x i64> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v2f64(<2 x double>* %a) #0 {
+define void @store_v2f64(ptr %a) #0 {
 ; CHECK-LABEL: store_v2f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stp xzr, xzr, [x0]
 ; CHECK-NEXT:    ret
-  store <2 x double> zeroinitializer, <2 x double>* %a
+  store <2 x double> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v4i64(<4 x i64>* %a) #0 {
+define void @store_v4i64(ptr %a) #0 {
 ; CHECK-LABEL: store_v4i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #0 // =0x0
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x i64> zeroinitializer, <4 x i64>* %a
+  store <4 x i64> zeroinitializer, ptr %a
   ret void
 }
 
-define void @store_v4f64(<4 x double>* %a) #0 {
+define void @store_v4f64(ptr %a) #0 {
 ; CHECK-LABEL: store_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, #0 // =0x0
 ; CHECK-NEXT:    stp q0, q0, [x0]
 ; CHECK-NEXT:    ret
-  store <4 x double> zeroinitializer, <4 x double>* %a
+  store <4 x double> zeroinitializer, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
index 73c8e2aee5a04..8aae6a70181db 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
@@ -3,20 +3,20 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-define void @store_trunc_v8i16i8(<8 x i16>* %ap, <8 x i8>* %dest) #0 {
+define void @store_trunc_v8i16i8(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v8i16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <8 x i16>, <8 x i16>* %ap
+  %a = load <8 x i16>, ptr %ap
   %val = trunc <8 x i16> %a to <8 x i8>
-  store <8 x i8> %val, <8 x i8>* %dest
+  store <8 x i8> %val, ptr %dest
   ret void
 }
 
-define void @store_trunc_v4i32i8(<4 x i32>* %ap, <4 x i8>* %dest) #0 {
+define void @store_trunc_v4i32i8(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v4i32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
@@ -24,39 +24,39 @@ define void @store_trunc_v4i32i8(<4 x i32>* %ap, <4 x i8>* %dest) #0 {
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <4 x i32>, <4 x i32>* %ap
+  %a = load <4 x i32>, ptr %ap
   %val = trunc <4 x i32> %a to <4 x i8>
-  store <4 x i8> %val, <4 x i8>* %dest
+  store <4 x i8> %val, ptr %dest
   ret void
 }
 
-define void @store_trunc_v4i32i16(<4 x i32>* %ap, <4 x i16>* %dest) #0 {
+define void @store_trunc_v4i32i16(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v4i32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    str d0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <4 x i32>, <4 x i32>* %ap
+  %a = load <4 x i32>, ptr %ap
   %val = trunc <4 x i32> %a to <4 x i16>
-  store <4 x i16> %val, <4 x i16>* %dest
+  store <4 x i16> %val, ptr %dest
   ret void
 }
 
-define void @store_trunc_v2i64i8(<2 x i64>* %ap, <2 x i32>* %dest) #0 {
+define void @store_trunc_v2i64i8(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v2i64i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <2 x i64>, <2 x i64>* %ap
+  %a = load <2 x i64>, ptr %ap
   %val = trunc <2 x i64> %a to <2 x i32>
-  store <2 x i32> %val, <2 x i32>* %dest
+  store <2 x i32> %val, ptr %dest
   ret void
 }
 
-define void @store_trunc_v2i256i64(<2 x i256>* %ap, <2 x i64>* %dest) #0 {
+define void @store_trunc_v2i256i64(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v2i256i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x0, #32]
@@ -65,9 +65,9 @@ define void @store_trunc_v2i256i64(<2 x i256>* %ap, <2 x i64>* %dest) #0 {
 ; CHECK-NEXT:    splice z1.d, p0, z1.d, z0.d
 ; CHECK-NEXT:    str q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <2 x i256>, <2 x i256>* %ap
+  %a = load <2 x i256>, ptr %ap
   %val = trunc <2 x i256> %a to <2 x i64>
-  store <2 x i64> %val, <2 x i64>* %dest
+  store <2 x i64> %val, ptr %dest
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
index 503f800a88dbb..4200605c66350 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc.ll
@@ -7,7 +7,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; truncate i16 -> i8
 ;
 
-define <16 x i8> @trunc_v16i16_v16i8(<16 x i16>* %in) #0 {
+define <16 x i8> @trunc_v16i16_v16i8(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v16i16_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -17,13 +17,13 @@ define <16 x i8> @trunc_v16i16_v16i8(<16 x i16>* %in) #0 {
 ; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <16 x i16>, <16 x i16>* %in
+  %a = load <16 x i16>, ptr %in
   %b = trunc <16 x i16> %a to <16 x i8>
   ret <16 x i8> %b
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i16_v32i8(<32 x i16>* %in, <32 x i8>* %out) #0 {
+define void @trunc_v32i16_v32i8(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v32i16_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -39,15 +39,15 @@ define void @trunc_v32i16_v32i8(<32 x i16>* %in, <32 x i8>* %out) #0 {
 ; CHECK-NEXT:    add z1.b, z3.b, z3.b
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i16>, <32 x i16>* %in
+  %a = load <32 x i16>, ptr %in
   %b = trunc <32 x i16> %a to <32 x i8>
   %c = add <32 x i8> %b, %b
-  store <32 x i8> %c, <32 x i8>* %out
+  store <32 x i8> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v64i16_v64i8(<64 x i16>* %in, <64 x i8>* %out) #0 {
+define void @trunc_v64i16_v64i8(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v64i16_v64i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #64]
@@ -74,15 +74,15 @@ define void @trunc_v64i16_v64i8(<64 x i16>* %in, <64 x i8>* %out) #0 {
 ; CHECK-NEXT:    add z1.b, z3.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <64 x i16>, <64 x i16>* %in
+  %a = load <64 x i16>, ptr %in
   %b = trunc <64 x i16> %a to <64 x i8>
   %c = add <64 x i8> %b, %b
-  store <64 x i8> %c, <64 x i8>* %out
+  store <64 x i8> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) #0 {
+define void @trunc_v128i16_v128i8(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v128i16_v128i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #192]
@@ -131,10 +131,10 @@ define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) #0 {
 ; CHECK-NEXT:    add z0.b, z18.b, z18.b
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <128 x i16>, <128 x i16>* %in
+  %a = load <128 x i16>, ptr %in
   %b = trunc <128 x i16> %a to <128 x i8>
   %c = add <128 x i8> %b, %b
-  store <128 x i8> %c, <128 x i8>* %out
+  store <128 x i8> %c, ptr %out
   ret void
 }
 
@@ -142,7 +142,7 @@ define void @trunc_v128i16_v128i8(<128 x i16>* %in, <128 x i8>* %out) #0 {
 ; truncate i32 -> i8
 ;
 
-define <8 x i8> @trunc_v8i32_v8i8(<8 x i32>* %in) #0 {
+define <8 x i8> @trunc_v8i32_v8i8(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v8i32_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -153,12 +153,12 @@ define <8 x i8> @trunc_v8i32_v8i8(<8 x i32>* %in) #0 {
 ; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <8 x i32>, <8 x i32>* %in
+  %a = load <8 x i32>, ptr %in
   %b = trunc <8 x i32> %a to <8 x i8>
   ret <8 x i8> %b
 }
 
-define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %in) #0 {
+define <16 x i8> @trunc_v16i32_v16i8(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v16i32_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -176,13 +176,13 @@ define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %in) #0 {
 ; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <16 x i32>, <16 x i32>* %in
+  %a = load <16 x i32>, ptr %in
   %b = trunc <16 x i32> %a to <16 x i8>
   ret <16 x i8> %b
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i32_v32i8(<32 x i32>* %in, <32 x i8>* %out) #0 {
+define void @trunc_v32i32_v32i8(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v32i32_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #96]
@@ -213,15 +213,15 @@ define void @trunc_v32i32_v32i8(<32 x i32>* %in, <32 x i8>* %out) #0 {
 ; CHECK-NEXT:    add z1.b, z2.b, z2.b
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i32>, <32 x i32>* %in
+  %a = load <32 x i32>, ptr %in
   %b = trunc <32 x i32> %a to <32 x i8>
   %c = add <32 x i8> %b, %b
-  store <32 x i8> %c, <32 x i8>* %out
+  store <32 x i8> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) #0 {
+define void @trunc_v64i32_v64i8(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v64i32_v64i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #128]
@@ -277,10 +277,10 @@ define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) #0 {
 ; CHECK-NEXT:    add z0.b, z3.b, z3.b
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <64 x i32>, <64 x i32>* %in
+  %a = load <64 x i32>, ptr %in
   %b = trunc <64 x i32> %a to <64 x i8>
   %c = add <64 x i8> %b, %b
-  store <64 x i8> %c, <64 x i8>* %out
+  store <64 x i8> %c, ptr %out
   ret void
 }
 
@@ -288,7 +288,7 @@ define void @trunc_v64i32_v64i8(<64 x i32>* %in, <64 x i8>* %out) #0 {
 ; truncate i32 -> i16
 ;
 
-define <8 x i16> @trunc_v8i32_v8i16(<8 x i32>* %in) #0 {
+define <8 x i16> @trunc_v8i32_v8i16(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v8i32_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -298,13 +298,13 @@ define <8 x i16> @trunc_v8i32_v8i16(<8 x i32>* %in) #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <8 x i32>, <8 x i32>* %in
+  %a = load <8 x i32>, ptr %in
   %b = trunc <8 x i32> %a to <8 x i16>
   ret <8 x i16> %b
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v16i32_v16i16(<16 x i32>* %in, <16 x i16>* %out) #0 {
+define void @trunc_v16i32_v16i16(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v16i32_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -320,15 +320,15 @@ define void @trunc_v16i32_v16i16(<16 x i32>* %in, <16 x i16>* %out) #0 {
 ; CHECK-NEXT:    add z1.h, z3.h, z3.h
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <16 x i32>, <16 x i32>* %in
+  %a = load <16 x i32>, ptr %in
   %b = trunc <16 x i32> %a to <16 x i16>
   %c = add <16 x i16> %b, %b
-  store <16 x i16> %c, <16 x i16>* %out
+  store <16 x i16> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i32_v32i16(<32 x i32>* %in, <32 x i16>* %out) #0 {
+define void @trunc_v32i32_v32i16(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v32i32_v32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #64]
@@ -355,15 +355,15 @@ define void @trunc_v32i32_v32i16(<32 x i32>* %in, <32 x i16>* %out) #0 {
 ; CHECK-NEXT:    add z1.h, z3.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i32>, <32 x i32>* %in
+  %a = load <32 x i32>, ptr %in
   %b = trunc <32 x i32> %a to <32 x i16>
   %c = add <32 x i16> %b, %b
-  store <32 x i16> %c, <32 x i16>* %out
+  store <32 x i16> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) #0 {
+define void @trunc_v64i32_v64i16(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v64i32_v64i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #192]
@@ -412,10 +412,10 @@ define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) #0 {
 ; CHECK-NEXT:    add z0.h, z18.h, z18.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <64 x i32>, <64 x i32>* %in
+  %a = load <64 x i32>, ptr %in
   %b = trunc <64 x i32> %a to <64 x i16>
   %c = add <64 x i16> %b, %b
-  store <64 x i16> %c, <64 x i16>* %out
+  store <64 x i16> %c, ptr %out
   ret void
 }
 
@@ -424,7 +424,7 @@ define void @trunc_v64i32_v64i16(<64 x i32>* %in, <64 x i16>* %out) #0 {
 ;
 
 ; NOTE: v4i8 is not legal so result i8 elements are held within i16 containers.
-define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) #0 {
+define <4 x i8> @trunc_v4i64_v4i8(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v4i64_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -435,12 +435,12 @@ define <4 x i8> @trunc_v4i64_v4i8(<4 x i64>* %in) #0 {
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <4 x i64>, <4 x i64>* %in
+  %a = load <4 x i64>, ptr %in
   %b = trunc <4 x i64> %a to <4 x i8>
   ret <4 x i8> %b
 }
 
-define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %in) #0 {
+define <8 x i8> @trunc_v8i64_v8i8(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v8i64_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -459,12 +459,12 @@ define <8 x i8> @trunc_v8i64_v8i8(<8 x i64>* %in) #0 {
 ; CHECK-NEXT:    uzp1 z0.b, z1.b, z1.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <8 x i64>, <8 x i64>* %in
+  %a = load <8 x i64>, ptr %in
   %b = trunc <8 x i64> %a to <8 x i8>
   ret <8 x i8> %b
 }
 
-define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %in) #0 {
+define <16 x i8> @trunc_v16i64_v16i8(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v16i64_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #96]
@@ -497,13 +497,13 @@ define <16 x i8> @trunc_v16i64_v16i8(<16 x i64>* %in) #0 {
 ; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <16 x i64>, <16 x i64>* %in
+  %a = load <16 x i64>, ptr %in
   %b = trunc <16 x i64> %a to <16 x i8>
   ret <16 x i8> %b
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) #0 {
+define void @trunc_v32i64_v32i8(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v32i64_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #224]
@@ -563,10 +563,10 @@ define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) #0 {
 ; CHECK-NEXT:    add z1.b, z2.b, z2.b
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i64>, <32 x i64>* %in
+  %a = load <32 x i64>, ptr %in
   %b = trunc <32 x i64> %a to <32 x i8>
   %c = add <32 x i8> %b, %b
-  store <32 x i8> %c, <32 x i8>* %out
+  store <32 x i8> %c, ptr %out
   ret void
 }
 
@@ -574,7 +574,7 @@ define void @trunc_v32i64_v32i8(<32 x i64>* %in, <32 x i8>* %out) #0 {
 ; truncate i64 -> i16
 ;
 
-define <4 x i16> @trunc_v4i64_v4i16(<4 x i64>* %in) #0 {
+define <4 x i16> @trunc_v4i64_v4i16(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v4i64_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -585,12 +585,12 @@ define <4 x i16> @trunc_v4i64_v4i16(<4 x i64>* %in) #0 {
 ; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <4 x i64>, <4 x i64>* %in
+  %a = load <4 x i64>, ptr %in
   %b = trunc <4 x i64> %a to <4 x i16>
   ret <4 x i16> %b
 }
 
-define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %in) #0 {
+define <8 x i16> @trunc_v8i64_v8i16(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v8i64_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -608,13 +608,13 @@ define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %in) #0 {
 ; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <8 x i64>, <8 x i64>* %in
+  %a = load <8 x i64>, ptr %in
   %b = trunc <8 x i64> %a to <8 x i16>
   ret <8 x i16> %b
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v16i64_v16i16(<16 x i64>* %in, <16 x i16>* %out) #0 {
+define void @trunc_v16i64_v16i16(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v16i64_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #96]
@@ -645,15 +645,15 @@ define void @trunc_v16i64_v16i16(<16 x i64>* %in, <16 x i16>* %out) #0 {
 ; CHECK-NEXT:    add z1.h, z2.h, z2.h
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <16 x i64>, <16 x i64>* %in
+  %a = load <16 x i64>, ptr %in
   %b = trunc <16 x i64> %a to <16 x i16>
   %c = add <16 x i16> %b, %b
-  store <16 x i16> %c, <16 x i16>* %out
+  store <16 x i16> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) #0 {
+define void @trunc_v32i64_v32i16(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v32i64_v32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #128]
@@ -709,10 +709,10 @@ define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) #0 {
 ; CHECK-NEXT:    add z0.h, z3.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i64>, <32 x i64>* %in
+  %a = load <32 x i64>, ptr %in
   %b = trunc <32 x i64> %a to <32 x i16>
   %c = add <32 x i16> %b, %b
-  store <32 x i16> %c, <32 x i16>* %out
+  store <32 x i16> %c, ptr %out
   ret void
 }
 
@@ -720,7 +720,7 @@ define void @trunc_v32i64_v32i16(<32 x i64>* %in, <32 x i16>* %out) #0 {
 ; truncate i64 -> i32
 ;
 
-define <4 x i32> @trunc_v4i64_v4i32(<4 x i64>* %in) #0 {
+define <4 x i32> @trunc_v4i64_v4i32(ptr %in) #0 {
 ; CHECK-LABEL: trunc_v4i64_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -730,13 +730,13 @@ define <4 x i32> @trunc_v4i64_v4i32(<4 x i64>* %in) #0 {
 ; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
-  %a = load <4 x i64>, <4 x i64>* %in
+  %a = load <4 x i64>, ptr %in
   %b = trunc <4 x i64> %a to <4 x i32>
   ret <4 x i32> %b
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v8i64_v8i32(<8 x i64>* %in, <8 x i32>* %out) #0 {
+define void @trunc_v8i64_v8i32(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v8i64_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -752,15 +752,15 @@ define void @trunc_v8i64_v8i32(<8 x i64>* %in, <8 x i32>* %out) #0 {
 ; CHECK-NEXT:    add z1.s, z3.s, z3.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <8 x i64>, <8 x i64>* %in
+  %a = load <8 x i64>, ptr %in
   %b = trunc <8 x i64> %a to <8 x i32>
   %c = add <8 x i32> %b, %b
-  store <8 x i32> %c, <8 x i32>* %out
+  store <8 x i32> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v16i64_v16i32(<16 x i64>* %in, <16 x i32>* %out) #0 {
+define void @trunc_v16i64_v16i32(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v16i64_v16i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #64]
@@ -787,15 +787,15 @@ define void @trunc_v16i64_v16i32(<16 x i64>* %in, <16 x i32>* %out) #0 {
 ; CHECK-NEXT:    add z1.s, z3.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <16 x i64>, <16 x i64>* %in
+  %a = load <16 x i64>, ptr %in
   %b = trunc <16 x i64> %a to <16 x i32>
   %c = add <16 x i32> %b, %b
-  store <16 x i32> %c, <16 x i32>* %out
+  store <16 x i32> %c, ptr %out
   ret void
 }
 
 ; NOTE: Extra 'add' is to prevent the truncate being combined with the store.
-define void @trunc_v32i64_v32i32(<32 x i64>* %in, <32 x i32>* %out) #0 {
+define void @trunc_v32i64_v32i32(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: trunc_v32i64_v32i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #192]
@@ -844,10 +844,10 @@ define void @trunc_v32i64_v32i32(<32 x i64>* %in, <32 x i32>* %out) #0 {
 ; CHECK-NEXT:    add z0.s, z18.s, z18.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
 ; CHECK-NEXT:    ret
-  %a = load <32 x i64>, <32 x i64>* %in
+  %a = load <32 x i64>, ptr %in
   %b = trunc <32 x i64> %a to <32 x i32>
   %c = add <32 x i32> %b, %b
-  store <32 x i32> %c, <32 x i32>* %out
+  store <32 x i32> %c, ptr %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll
index 17af92271b215..07a55ebb3cb74 100644
--- a/llvm/test/CodeGen/AArch64/sve-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll
@@ -200,7 +200,7 @@ define <vscale x 1 x i1> @trunc_nxv1i32_to_nxv1i1(<vscale x 1 x i32> %in) {
   ret <vscale x 1 x i1> %out
 }
 
-define void @trunc_promoteIntRes(<vscale x 4 x i64> %0, i16* %ptr) {
+define void @trunc_promoteIntRes(<vscale x 4 x i64> %0, ptr %ptr) {
 ; CHECK-LABEL: trunc_promoteIntRes:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ptrue p0.s
@@ -209,8 +209,7 @@ define void @trunc_promoteIntRes(<vscale x 4 x i64> %0, i16* %ptr) {
 ; CHECK-NEXT:    ret
 entry:
   %1 = trunc <vscale x 4 x i64> %0 to <vscale x 4 x i16>
-  %2 = bitcast i16* %ptr to <vscale x 4 x i16>*
-  store <vscale x 4 x i16> %1, <vscale x 4 x i16>* %2, align 2
+  store <vscale x 4 x i16> %1, <vscale x 4 x i16>* %ptr, align 2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll b/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
index cd097d5cbb1da..0f42fa10caef0 100644
--- a/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
+++ b/llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll
@@ -3,20 +3,19 @@
 ; CHECK: Passing SVE types to variadic functions is currently not supported
 
 @.str = private unnamed_addr constant [4 x i8] c"fmt\00", align 1
-define void @foo(i8* %fmt, ...) nounwind {
+define void @foo(ptr %fmt, ...) nounwind {
 entry:
-  %fmt.addr = alloca i8*, align 8
-  %args = alloca i8*, align 8
+  %fmt.addr = alloca ptr, align 8
+  %args = alloca ptr, align 8
   %vc = alloca i32, align 4
   %vv = alloca <vscale x 4 x i32>, align 16
-  store i8* %fmt, i8** %fmt.addr, align 8
-  %args1 = bitcast i8** %args to i8*
-  call void @llvm.va_start(i8* %args1)
-  %0 = va_arg i8** %args, i32
-  store i32 %0, i32* %vc, align 4
-  %1 = va_arg i8** %args, <vscale x 4 x i32>
+  store ptr %fmt, ptr %fmt.addr, align 8
+  call void @llvm.va_start(ptr %args)
+  %0 = va_arg ptr %args, i32
+  store i32 %0, ptr %vc, align 4
+  %1 = va_arg ptr %args, <vscale x 4 x i32>
   store <vscale x 4 x i32> %1, <vscale x 4 x i32>* %vv, align 16
   ret void
 }
 
-declare void @llvm.va_start(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind

diff  --git a/llvm/test/CodeGen/AArch64/sve-varargs-caller-broken.ll b/llvm/test/CodeGen/AArch64/sve-varargs-caller-broken.ll
index 0f26728b26cb8..1ecdd2ff43781 100644
--- a/llvm/test/CodeGen/AArch64/sve-varargs-caller-broken.ll
+++ b/llvm/test/CodeGen/AArch64/sve-varargs-caller-broken.ll
@@ -1,12 +1,11 @@
 ; RUN: not --crash llc -mtriple aarch64-linux-gnu -mattr=+sve <%s 2>&1 | FileCheck %s
 
-declare i32 @sve_printf(i8*, <vscale x 4 x i32>, ...)
+declare i32 @sve_printf(ptr, <vscale x 4 x i32>, ...)
 
 @.str_1 = internal constant [6 x i8] c"boo!\0A\00"
 
 ; CHECK: Passing SVE types to variadic functions is currently not supported
 define void @foo(<vscale x 4 x i32> %x) {
-  %f = getelementptr [6 x i8], [6 x i8]* @.str_1, i64 0, i64 0
-  call i32 (i8*, <vscale x 4 x i32>, ...) @sve_printf(i8* %f, <vscale x 4 x i32> %x, <vscale x 4 x i32> %x)
+  call i32 (ptr, <vscale x 4 x i32>, ...) @sve_printf(ptr @.str_1, <vscale x 4 x i32> %x, <vscale x 4 x i32> %x)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-varargs.ll b/llvm/test/CodeGen/AArch64/sve-varargs.ll
index 4ba5ad8a1008a..c63491f445b9c 100644
--- a/llvm/test/CodeGen/AArch64/sve-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/sve-varargs.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
 
-declare i32 @sve_printf(i8*, <vscale x 4 x i32>, ...)
+declare i32 @sve_printf(ptr, <vscale x 4 x i32>, ...)
 
 @.str_1 = internal constant [6 x i8] c"boo!\0A\00"
 
@@ -18,7 +18,6 @@ define void @foo(<vscale x 4 x i32> %x) uwtable {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
-  %f = getelementptr [6 x i8], [6 x i8]* @.str_1, i64 0, i64 0
-  call i32 (i8*, <vscale x 4 x i32>, ...) @sve_printf(i8* %f, <vscale x 4 x i32> %x)
+  call i32 (ptr, <vscale x 4 x i32>, ...) @sve_printf(ptr @.str_1, <vscale x 4 x i32> %x)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
index 19ebd4265bd61..3e6236a149ff3 100644
--- a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll
@@ -4,7 +4,7 @@
 
 target triple = "aarch64-unknown-linux-gnu"
 
-define void @func_vscale_none(<16 x i32>* %a, <16 x i32>* %b) #0 {
+define void @func_vscale_none(ptr %a, ptr %b) #0 {
 ; CHECK-NOARG-LABEL: func_vscale_none:
 ; CHECK-NOARG:       // %bb.0:
 ; CHECK-NOARG-NEXT:    ldp q0, q1, [x0, #32]
@@ -27,16 +27,16 @@ define void @func_vscale_none(<16 x i32>* %a, <16 x i32>* %b) #0 {
 ; CHECK-ARG-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-ARG-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-ARG-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
-  %op2 = load <16 x i32>, <16 x i32>* %b
+  %op1 = load <16 x i32>, ptr %a
+  %op2 = load <16 x i32>, ptr %b
   %res = add <16 x i32> %op1, %op2
-  store <16 x i32> %res, <16 x i32>* %a
+  store <16 x i32> %res, ptr %a
   ret void
 }
 
 attributes #0 = { "target-features"="+sve" }
 
-define void @func_vscale1_1(<16 x i32>* %a, <16 x i32>* %b) #1 {
+define void @func_vscale1_1(ptr %a, ptr %b) #1 {
 ; CHECK-LABEL: func_vscale1_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0, #32]
@@ -50,16 +50,16 @@ define void @func_vscale1_1(<16 x i32>* %a, <16 x i32>* %b) #1 {
 ; CHECK-NEXT:    add v0.4s, v3.4s, v4.4s
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
-  %op2 = load <16 x i32>, <16 x i32>* %b
+  %op1 = load <16 x i32>, ptr %a
+  %op2 = load <16 x i32>, ptr %b
   %res = add <16 x i32> %op1, %op2
-  store <16 x i32> %res, <16 x i32>* %a
+  store <16 x i32> %res, ptr %a
   ret void
 }
 
 attributes #1 = { "target-features"="+sve" vscale_range(1,1) }
 
-define void @func_vscale2_2(<16 x i32>* %a, <16 x i32>* %b) #2 {
+define void @func_vscale2_2(ptr %a, ptr %b) #2 {
 ; CHECK-LABEL: func_vscale2_2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #8
@@ -73,16 +73,16 @@ define void @func_vscale2_2(<16 x i32>* %a, <16 x i32>* %b) #2 {
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
-  %op2 = load <16 x i32>, <16 x i32>* %b
+  %op1 = load <16 x i32>, ptr %a
+  %op2 = load <16 x i32>, ptr %b
   %res = add <16 x i32> %op1, %op2
-  store <16 x i32> %res, <16 x i32>* %a
+  store <16 x i32> %res, ptr %a
   ret void
 }
 
 attributes #2 = { "target-features"="+sve" vscale_range(2,2) }
 
-define void @func_vscale2_4(<16 x i32>* %a, <16 x i32>* %b) #3 {
+define void @func_vscale2_4(ptr %a, ptr %b) #3 {
 ; CHECK-LABEL: func_vscale2_4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #8
@@ -96,16 +96,16 @@ define void @func_vscale2_4(<16 x i32>* %a, <16 x i32>* %b) #3 {
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
-  %op2 = load <16 x i32>, <16 x i32>* %b
+  %op1 = load <16 x i32>, ptr %a
+  %op2 = load <16 x i32>, ptr %b
   %res = add <16 x i32> %op1, %op2
-  store <16 x i32> %res, <16 x i32>* %a
+  store <16 x i32> %res, ptr %a
   ret void
 }
 
 attributes #3 = { "target-features"="+sve" vscale_range(2,4) }
 
-define void @func_vscale4_4(<16 x i32>* %a, <16 x i32>* %b) #4 {
+define void @func_vscale4_4(ptr %a, ptr %b) #4 {
 ; CHECK-LABEL: func_vscale4_4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
@@ -114,16 +114,16 @@ define void @func_vscale4_4(<16 x i32>* %a, <16 x i32>* %b) #4 {
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
-  %op2 = load <16 x i32>, <16 x i32>* %b
+  %op1 = load <16 x i32>, ptr %a
+  %op2 = load <16 x i32>, ptr %b
   %res = add <16 x i32> %op1, %op2
-  store <16 x i32> %res, <16 x i32>* %a
+  store <16 x i32> %res, ptr %a
   ret void
 }
 
 attributes #4 = { "target-features"="+sve" vscale_range(4,4) }
 
-define void @func_vscale8_8(<16 x i32>* %a, <16 x i32>* %b) #5 {
+define void @func_vscale8_8(ptr %a, ptr %b) #5 {
 ; CHECK-LABEL: func_vscale8_8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s, vl16
@@ -132,10 +132,10 @@ define void @func_vscale8_8(<16 x i32>* %a, <16 x i32>* %b) #5 {
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
-  %op1 = load <16 x i32>, <16 x i32>* %a
-  %op2 = load <16 x i32>, <16 x i32>* %b
+  %op1 = load <16 x i32>, ptr %a
+  %op2 = load <16 x i32>, ptr %b
   %res = add <16 x i32> %op1, %op2
-  store <16 x i32> %res, <16 x i32>* %a
+  store <16 x i32> %res, ptr %a
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-contiguous-conflict-detection.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-contiguous-conflict-detection.ll
index 364ce8752827c..0799989308d46 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-contiguous-conflict-detection.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-contiguous-conflict-detection.ll
@@ -6,75 +6,75 @@
 ; WHILERW
 ;
 
-define <vscale x 16 x i1> @whilerw_i8(i8* %a, i8* %b) {
+define <vscale x 16 x i1> @whilerw_i8(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.b, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilerw.b.nx16i1(i8* %a, i8* %b)
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilerw.b.nx16i1(ptr %a, ptr %b)
   ret <vscale x 16 x i1> %out
 }
 
-define <vscale x 8 x i1> @whilerw_i16(i16* %a, i16* %b) {
+define <vscale x 8 x i1> @whilerw_i16(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.h, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1(i16* %a, i16* %b)
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1(ptr %a, ptr %b)
   ret <vscale x 8 x i1> %out
 }
 
-define <vscale x 4 x i1> @whilerw_i32(i32* %a, i32* %b) {
+define <vscale x 4 x i1> @whilerw_i32(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.s, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1(i32* %a, i32* %b)
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1(ptr %a, ptr %b)
   ret <vscale x 4 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilerw_i64(i64* %a, i64* %b) {
+define <vscale x 2 x i1> @whilerw_i64(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.d, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1(i64* %a, i64* %b)
+  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1(ptr %a, ptr %b)
   ret <vscale x 2 x i1> %out
 }
 
-define <vscale x 8 x i1> @whilerw_bfloat(bfloat* %a, bfloat* %b) {
+define <vscale x 8 x i1> @whilerw_bfloat(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_bfloat:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.h, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.bf16.bf16(ptr %a, ptr %b)
   ret <vscale x 8 x i1> %out
 }
 
-define <vscale x 8 x i1> @whilerw_half(half* %a, half* %b) {
+define <vscale x 8 x i1> @whilerw_half(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_half:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.h, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.f16.f16(half* %a, half* %b)
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.f16.f16(ptr %a, ptr %b)
   ret <vscale x 8 x i1> %out
 }
 
-define <vscale x 4 x i1> @whilerw_float(float* %a, float* %b) {
+define <vscale x 4 x i1> @whilerw_float(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.s, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1.f32.f32(float* %a, float* %b)
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1.f32.f32(ptr %a, ptr %b)
   ret <vscale x 4 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilerw_double(double* %a, double* %b) {
+define <vscale x 2 x i1> @whilerw_double(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilerw_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilerw p0.d, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1.f64.f64(double* %a, double* %b)
+  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1.f64.f64(ptr %a, ptr %b)
   ret <vscale x 2 x i1> %out
 }
 
@@ -82,94 +82,94 @@ define <vscale x 2 x i1> @whilerw_double(double* %a, double* %b) {
 ; WHILEWR
 ;
 
-define <vscale x 16 x i1> @whilewr_i8(i8* %a, i8* %b) {
+define <vscale x 16 x i1> @whilewr_i8(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.b, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilewr.b.nx16i1(i8* %a, i8* %b)
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilewr.b.nx16i1(ptr %a, ptr %b)
   ret <vscale x 16 x i1> %out
 }
 
-define <vscale x 8 x i1> @whilewr_i16(i16* %a, i16* %b) {
+define <vscale x 8 x i1> @whilewr_i16(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.h, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1(i16* %a, i16* %b)
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1(ptr %a, ptr %b)
   ret <vscale x 8 x i1> %out
 }
 
-define <vscale x 4 x i1> @whilewr_i32(i32* %a, i32* %b) {
+define <vscale x 4 x i1> @whilewr_i32(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.s, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1(i32* %a, i32* %b)
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1(ptr %a, ptr %b)
   ret <vscale x 4 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilewr_i64(i64* %a, i64* %b) {
+define <vscale x 2 x i1> @whilewr_i64(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.d, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1(i64* %a, i64* %b)
+  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1(ptr %a, ptr %b)
   ret <vscale x 2 x i1> %out
 }
 
-define <vscale x 8 x i1> @whilewr_bfloat(bfloat* %a, bfloat* %b) {
+define <vscale x 8 x i1> @whilewr_bfloat(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_bfloat:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.h, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.bf16.bf16(ptr %a, ptr %b)
   ret <vscale x 8 x i1> %out
 }
 
-define <vscale x 8 x i1> @whilewr_half(half* %a, half* %b) {
+define <vscale x 8 x i1> @whilewr_half(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_half:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.h, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.f16.f16(half* %a, half* %b)
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.f16.f16(ptr %a, ptr %b)
   ret <vscale x 8 x i1> %out
 }
 
-define <vscale x 4 x i1> @whilewr_float(float* %a, float* %b) {
+define <vscale x 4 x i1> @whilewr_float(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.s, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1.f32.f32(float* %a, float* %b)
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1.f32.f32(ptr %a, ptr %b)
   ret <vscale x 4 x i1> %out
 }
 
-define <vscale x 2 x i1> @whilewr_double(double* %a, double* %b) {
+define <vscale x 2 x i1> @whilewr_double(ptr %a, ptr %b) {
 ; CHECK-LABEL: whilewr_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    whilewr p0.d, x0, x1
 ; CHECK-NEXT:    ret
-  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1.f64.f64(double* %a, double* %b)
+  %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1.f64.f64(ptr %a, ptr %b)
   ret <vscale x 2 x i1> %out
 }
 
-declare <vscale x 16 x i1> @llvm.aarch64.sve.whilerw.b.nx16i1(i8* %a, i8* %b)
-declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1(i16* %a, i16* %b)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1(i32* %a, i32* %b)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1(i64* %a, i64* %b)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.whilerw.b.nx16i1(ptr %a, ptr %b)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1(ptr %a, ptr %b)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1(ptr %a, ptr %b)
+declare <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1(ptr %a, ptr %b)
 
-declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
-declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.f16.f16(half* %a, half* %b)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1.f32.f32(float* %a, float* %b)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1.f64.f64(double* %a, double* %b)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.bf16.bf16(ptr %a, ptr %b)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nx8i1.f16.f16(ptr %a, ptr %b)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nx4i1.f32.f32(ptr %a, ptr %b)
+declare <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nx2i1.f64.f64(ptr %a, ptr %b)
 
-declare <vscale x 16 x i1> @llvm.aarch64.sve.whilewr.b.nx16i1(i8* %a, i8* %b)
-declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1(i16* %a, i16* %b)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1(i32* %a, i32* %b)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1(i64* %a, i64* %b)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.whilewr.b.nx16i1(ptr %a, ptr %b)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1(ptr %a, ptr %b)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1(ptr %a, ptr %b)
+declare <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1(ptr %a, ptr %b)
 
-declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.bf16.bf16(bfloat* %a, bfloat* %b)
-declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.f16.f16(half* %a, half* %b)
-declare <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1.f32.f32(float* %a, float* %b)
-declare <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1.f64.f64(double* %a, double* %b)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.bf16.bf16(ptr %a, ptr %b)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nx8i1.f16.f16(ptr %a, ptr %b)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nx4i1.f32.f32(ptr %a, ptr %b)
+declare <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nx2i1.f64.f64(ptr %a, ptr %b)

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-32bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-32bit-unscaled-offset.ll
index 8b60c5521f434..9747a23472ae9 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-32bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-32bit-unscaled-offset.ll
@@ -8,50 +8,50 @@
 ;
 
 ; LDNT1B
-define <vscale x 4 x i32> @gldnt1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldnt1b_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldnt1b_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1b { z0.s }, p0/z, [z0.s, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
 ; LDNT1H
-define <vscale x 4 x i32> @gldnt1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldnt1h_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldnt1h_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.s }, p0/z, [z0.s, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
 ; LDNT1W
-define <vscale x 4 x i32> @gldnt1w_s_uxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldnt1w_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldnt1w_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [z0.s, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg,
-                                                                              i32* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   ret <vscale x 4 x i32> %load
 }
 
-define <vscale x 4 x float> @gldnt1w_s_uxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x float> @gldnt1w_s_uxtw_float(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldnt1w_s_uxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [z0.s, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %pg,
-                                                                                float* %base,
+                                                                                ptr %base,
                                                                                 <vscale x 4 x i32> %b)
   ret <vscale x 4 x float> %load
 }
@@ -62,42 +62,42 @@ define <vscale x 4 x float> @gldnt1w_s_uxtw_float(<vscale x 4 x i1> %pg, float*
 ;
 
 ; LDNT1SB
-define <vscale x 4 x i32> @gldnt1sb_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldnt1sb_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldnt1sb_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1sb { z0.s }, p0/z, [z0.s, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
-                                                                            i8* %base,
+                                                                            ptr %base,
                                                                             <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
 ; LDNT1SH
-define <vscale x 4 x i32> @gldnt1sh_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
+define <vscale x 4 x i32> @gldnt1sh_s_uxtw(<vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: gldnt1sh_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1sh { z0.s }, p0/z, [z0.s, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
-                                                                              i16* %base,
+                                                                              ptr %base,
                                                                               <vscale x 4 x i32> %b)
   %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %res
 }
 
 ; LDNT1B/LDNT1SB
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x i8> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; LDNT1H/LDNT1SH
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
 ; LDNT1W/LDNT1SW
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
 
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, ptr, <vscale x 4 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-scaled-offset.ll
index fd5343af9dd3e..6406bdd9c405b 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-scaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-scaled-offset.ll
@@ -8,52 +8,52 @@
 ;     ldnt1h z0.d, p0/z, [z0.d, x0]
 ;
 
-define <vscale x 2 x i64> @gldnt1h_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1h_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1h_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #1
 ; CHECK-NEXT:    ldnt1h { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                               i16* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1w_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1w_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1w_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #2
 ; CHECK-NEXT:    ldnt1w { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                               i32* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1d_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1d_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1d_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #3
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                               i64* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gldnt1d_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gldnt1d_index_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1d_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #3
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.index.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                                  double* %base,
+                                                                                  ptr %base,
                                                                                   <vscale x 2 x i64> %b)
   ret <vscale x 2 x double> %load
 }
@@ -65,33 +65,33 @@ define <vscale x 2 x double> @gldnt1d_index_double(<vscale x 2 x i1> %pg, double
 ;     ldnt1sh z0.d, p0/z, [z0.d, x0]
 ;
 
-define <vscale x 2 x i64> @gldnt1sh_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1sh_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1sh_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #1
 ; CHECK-NEXT:    ldnt1sh { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                               i16* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1sw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1sw_index(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1sw_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z0.d, z0.d, #2
 ; CHECK-NEXT:    ldnt1sw { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                               i32* %base,
+                                                                               ptr %base,
                                                                                <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.index.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.index.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-unscaled-offset.ll
index d0df22a3902e3..3b63375549a3a 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-gather-loads-64bit-unscaled-offset.ll
@@ -6,60 +6,60 @@
 ;   e.g. ldnt1h { z0.d }, p0/z, [z0.d, x0]
 ;
 
-define <vscale x 2 x i64> @gldnt1b_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1b_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1b { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                       i8* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1h_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1h_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1h { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                         i16* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %b)
   %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1w_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gldnt1w_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gldnt1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1w { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                         i32* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %offsets)
   %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1d_d(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1d_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1d_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> %pg,
-                                                                         i64* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %b)
   ret <vscale x 2 x i64> %load
 }
 
-define <vscale x 2 x double> @gldnt1d_d_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x double> @gldnt1d_d_double(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1d_d_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.nxv2f64(<vscale x 2 x i1> %pg,
-                                                                            double* %base,
+                                                                            ptr %base,
                                                                             <vscale x 2 x i64> %b)
   ret <vscale x 2 x double> %load
 }
@@ -69,44 +69,44 @@ define <vscale x 2 x double> @gldnt1d_d_double(<vscale x 2 x i1> %pg, double* %b
 ;   e.g. ldnt1sh { z0.d }, p0/z, [z0.d, x0]
 ;
 
-define <vscale x 2 x i64> @gldnt1sb_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1sb_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1sb_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1sb { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.nxv2i8(<vscale x 2 x i1> %pg,
-                                                                       i8* %base,
+                                                                       ptr %base,
                                                                        <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1sh_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define <vscale x 2 x i64> @gldnt1sh_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: gldnt1sh_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1sh { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> %pg,
-                                                                         i16* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %b)
   %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-define <vscale x 2 x i64> @gldnt1sw_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define <vscale x 2 x i64> @gldnt1sw_d(<vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: gldnt1sw_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldnt1sw { z0.d }, p0/z, [z0.d, x0]
 ; CHECK-NEXT:    ret
   %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> %pg,
-                                                                         i32* %base,
+                                                                         ptr %base,
                                                                          <vscale x 2 x i64> %offsets)
   %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %res
 }
 
-declare <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
-declare <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare <vscale x 2 x i8> @llvm.aarch64.sve.ldnt1.gather.nxv2i8(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.nxv2f64(<vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-32bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-32bit-unscaled-offset.ll
index 876c5b44e2960..08036399c13fb 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-32bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-32bit-unscaled-offset.ll
@@ -8,7 +8,7 @@
 ;
 
 ; STNT1B
-define void @sstnt1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %offsets) {
+define void @sstnt1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sstnt1b_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1b { z0.s }, p0, [z1.s, x0]
@@ -16,13 +16,13 @@ define void @sstnt1b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8*
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
   call void  @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> %data_trunc,
                                                          <vscale x 4 x i1> %pg,
-                                                         i8* %base,
+                                                         ptr %base,
                                                          <vscale x 4 x i32> %offsets)
   ret void
 }
 
 ; STNT1H
-define void @sstnt1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %offsets) {
+define void @sstnt1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sstnt1h_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1h { z0.s }, p0, [z1.s, x0]
@@ -30,53 +30,53 @@ define void @sstnt1h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
   call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> %data_trunc,
                                                          <vscale x 4 x i1> %pg,
-                                                         i16* %base,
+                                                         ptr %base,
                                                          <vscale x 4 x i32> %offsets)
   ret void
 }
 
 ; STNT1W
-define void @sstnt1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %offsets) {
+define void @sstnt1w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sstnt1w_s_uxtw:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1w { z0.s }, p0, [z1.s, x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32(<vscale x 4 x i32> %data,
                                                          <vscale x 4 x i1> %pg,
-                                                         i32* %base,
+                                                         ptr %base,
                                                          <vscale x 4 x i32> %offsets)
   ret void
 }
 
-define void @sstnt1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %offsets) {
+define void @sstnt1w_s_uxtw_float(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
 ; CHECK-LABEL: sstnt1w_s_uxtw_float:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1w { z0.s }, p0, [z1.s, x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32(<vscale x 4 x float> %data,
                                                          <vscale x 4 x i1> %pg,
-                                                         float* %base,
+                                                         ptr %base,
                                                          <vscale x 4 x i32> %offsets)
   ret void
 }
 
 ; STNT1B
-declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; STNT1H
-declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
 ; STNT1W
-declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i32>)
 
-declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
-declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.sxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)
+declare void @llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, ptr, <vscale x 4 x i32>)

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-scaled-offset.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-scaled-offset.ll
index 4f47a4b7bdea1..1ae7f361e47a3 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-scaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-scaled-offset.ll
@@ -8,7 +8,7 @@
 ;     stnt1h { z0.d }, p0, [z0.d, x0]
 ;
 
-define void @sstnt1h_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %offsets) {
+define void @sstnt1h_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sstnt1h_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z1.d, z1.d, #1
@@ -17,12 +17,12 @@ define void @sstnt1h_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                           <vscale x 2 x i1> %pg,
-                                                          i16* %base,
+                                                          ptr %base,
                                                           <vscale x 2 x i64> %offsets)
   ret void
 }
 
-define void @sstnt1w_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
+define void @sstnt1w_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sstnt1w_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z1.d, z1.d, #2
@@ -31,12 +31,12 @@ define void @sstnt1w_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32*
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                           <vscale x 2 x i1> %pg,
-                                                          i32* %base,
+                                                          ptr %base,
                                                           <vscale x 2 x i64> %offsets)
   ret void
 }
 
-define void  @sstnt1d_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %offsets) {
+define void  @sstnt1d_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sstnt1d_index:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z1.d, z1.d, #3
@@ -44,12 +44,12 @@ define void  @sstnt1d_index(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i64(<vscale x 2 x i64> %data,
                                                           <vscale x 2 x i1> %pg,
-                                                          i64* %base,
+                                                          ptr %base,
                                                           <vscale x 2 x i64> %offsets)
   ret void
 }
 
-define void  @sstnt1d_index_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %offsets) {
+define void  @sstnt1d_index_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %offsets) {
 ; CHECK-LABEL: sstnt1d_index_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsl z1.d, z1.d, #3
@@ -57,13 +57,13 @@ define void  @sstnt1d_index_double(<vscale x 2 x double> %data, <vscale x 2 x i1
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.scatter.index.nxv2f64(<vscale x 2 x double> %data,
                                                           <vscale x 2 x i1> %pg,
-                                                          double* %base,
+                                                          ptr %base,
                                                           <vscale x 2 x i64> %offsets)
   ret void
 }
 
 
-declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.index.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-unscaled-offset.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-unscaled-offset.ll
index a83bbb8454b6c..08f9bad404f77 100644
--- a/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-unscaled-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-nt-scatter-stores-64bit-unscaled-offset.ll
@@ -6,7 +6,7 @@
 ;   e.g. stnt1h { z0.d }, p0, [z1.d, x0]
 ;
 
-define void @sstnt1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
+define void @sstnt1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sstnt1b_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1b { z0.d }, p0, [z1.d, x0]
@@ -14,12 +14,12 @@ define void @sstnt1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %bas
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
   call void @llvm.aarch64.sve.stnt1.scatter.nxv2i8(<vscale x 2 x i8> %data_trunc,
                                                    <vscale x 2 x i1> %pg,
-                                                   i8* %base,
+                                                   ptr %base,
                                                    <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sstnt1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
+define void @sstnt1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sstnt1h_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1h { z0.d }, p0, [z1.d, x0]
@@ -27,12 +27,12 @@ define void @sstnt1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %ba
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
   call void @llvm.aarch64.sve.stnt1.scatter.nxv2i16(<vscale x 2 x i16> %data_trunc,
                                                     <vscale x 2 x i1> %pg,
-                                                    i16* %base,
+                                                    ptr %base,
                                                     <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sstnt1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
+define void @sstnt1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sstnt1w_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1w { z0.d }, p0, [z1.d, x0]
@@ -40,37 +40,37 @@ define void @sstnt1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %ba
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
   call void @llvm.aarch64.sve.stnt1.scatter.nxv2i32(<vscale x 2 x i32> %data_trunc,
                                                     <vscale x 2 x i1> %pg,
-                                                    i32* %base,
+                                                    ptr %base,
                                                     <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sstnt1d_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
+define void @sstnt1d_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sstnt1d_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [z1.d, x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.scatter.nxv2i64(<vscale x 2 x i64> %data,
                                                     <vscale x 2 x i1> %pg,
-                                                    i64* %base,
+                                                    ptr %base,
                                                     <vscale x 2 x i64> %b)
   ret void
 }
 
-define void @sstnt1d_d_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
+define void @sstnt1d_d_double(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sstnt1d_d_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [z1.d, x0]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.stnt1.scatter.nxv2f64(<vscale x 2 x double> %data,
                                                     <vscale x 2 x i1> %pg,
-                                                    double* %base,
+                                                    ptr %base,
                                                     <vscale x 2 x i64> %b)
   ret void
 }
 
-declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
-declare void @llvm.aarch64.sve.stnt1.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)
+declare void @llvm.aarch64.sve.stnt1.scatter.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, ptr, <vscale x 2 x i64>)

diff  --git a/llvm/test/CodeGen/AArch64/swift-async-pei.ll b/llvm/test/CodeGen/AArch64/swift-async-pei.ll
index 2369457d81b3a..72e1f9974ab19 100644
--- a/llvm/test/CodeGen/AArch64/swift-async-pei.ll
+++ b/llvm/test/CodeGen/AArch64/swift-async-pei.ll
@@ -5,13 +5,12 @@
 ; RUN: llc -mtriple arm64_32-apple-watchos -filetype asm -o - %s -swift-async-fp never | FileCheck %s -check-prefix CHECK-WATCHOS-NEVER
 ; RUN: llc -mtriple arm64_32-apple-watchos -filetype asm -o - %s -swift-async-fp auto | FileCheck %s -check-prefix CHECK-WATCHOS-AUTO
 
-declare i8** @llvm.swift.async.context.addr()
+declare ptr @llvm.swift.async.context.addr()
 
-define swifttailcc void @f(i8* swiftasync %ctx) {
-  %1 = bitcast i8* %ctx to i8**
-  %2 = load i8*, i8** %1, align 8
-  %3 = tail call i8** @llvm.swift.async.context.addr()
-  store i8* %2, i8** %3, align 8
+define swifttailcc void @f(ptr swiftasync %ctx) {
+  %1 = load ptr, ptr %ctx, align 8
+  %2 = tail call ptr @llvm.swift.async.context.addr()
+  store ptr %1, ptr %2, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/swift-async-reg.ll b/llvm/test/CodeGen/AArch64/swift-async-reg.ll
index 8b23c4b35b260..5ce1de642583d 100644
--- a/llvm/test/CodeGen/AArch64/swift-async-reg.ll
+++ b/llvm/test/CodeGen/AArch64/swift-async-reg.ll
@@ -2,17 +2,17 @@
 ; RUN: llc -mtriple=arm64-apple-ios %s -o - -global-isel | FileCheck %s
 ; RUN: llc -mtriple=arm64-apple-ios %s -o - -fast-isel | FileCheck %s
 
-define i8* @argument(i8* swiftasync %in) {
+define ptr @argument(ptr swiftasync %in) {
 ; CHECK-LABEL: argument:
 ; CHECK: mov x0, x22
 
-  ret i8* %in
+  ret ptr %in
 }
 
-define void @call(i8* %in) {
+define void @call(ptr %in) {
 ; CHECK-LABEL: call:
 ; CHECK: mov x22, x0
 
-  call i8* @argument(i8* swiftasync %in)
+  call ptr @argument(ptr swiftasync %in)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/swift-async-unwind.ll b/llvm/test/CodeGen/AArch64/swift-async-unwind.ll
index 5c98705c85fa3..19e0e0856e36c 100644
--- a/llvm/test/CodeGen/AArch64/swift-async-unwind.ll
+++ b/llvm/test/CodeGen/AArch64/swift-async-unwind.ll
@@ -7,7 +7,7 @@
 ; says to use DWARF correctly.
 
 ; CHECK: compact encoding: 0x03000000
-define void @foo(i8* swiftasync %in) "frame-pointer"="all" {
+define void @foo(ptr swiftasync %in) "frame-pointer"="all" {
   call void asm sideeffect "", "~{x23}"()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/swift-async-win.ll b/llvm/test/CodeGen/AArch64/swift-async-win.ll
index bff77ae6d8f99..995953df29c15 100644
--- a/llvm/test/CodeGen/AArch64/swift-async-win.ll
+++ b/llvm/test/CodeGen/AArch64/swift-async-win.ll
@@ -5,34 +5,29 @@ source_filename = "_Concurrency.ll"
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.32.31302"
 
-%swift.context = type { %swift.context*, void (%swift.context*)* }
+%swift.context = type { ptr, ptr }
 
 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
 
 ; Function Attrs: nounwind
-define hidden swifttailcc void @"$ss23withCheckedContinuation8function_xSS_yScCyxs5NeverOGXEtYalFTQ0_"(i8* nocapture readonly %0) #1 {
+define hidden swifttailcc void @"$ss23withCheckedContinuation8function_xSS_yScCyxs5NeverOGXEtYalFTQ0_"(ptr nocapture readonly %0) #1 {
 entryresume.0:
-  %1 = bitcast i8* %0 to i8**
-  %2 = load i8*, i8** %1, align 8
-  %3 = tail call i8** @llvm.swift.async.context.addr() #4
-  store i8* %2, i8** %3, align 8
-  %async.ctx.frameptr1 = getelementptr inbounds i8, i8* %2, i64 16
-  %.reload.addr4 = getelementptr inbounds i8, i8* %2, i64 24
-  %4 = bitcast i8* %.reload.addr4 to i8**
-  %.reload5 = load i8*, i8** %4, align 8
-  %.reload.addr = bitcast i8* %async.ctx.frameptr1 to i8**
-  %.reload = load i8*, i8** %.reload.addr, align 8
-  %5 = load i8*, i8** %1, align 8
-  store i8* %5, i8** %3, align 8
-  tail call swiftcc void @swift_task_dealloc(i8* %.reload5) #4
-  tail call void @llvm.lifetime.end.p0i8(i64 -1, i8* %.reload5)
-  tail call swiftcc void @swift_task_dealloc(i8* %.reload) #4
-  %6 = getelementptr inbounds i8, i8* %5, i64 8
-  %7 = bitcast i8* %6 to void (%swift.context*)**
-  %8 = load void (%swift.context*)*, void (%swift.context*)** %7, align 8
-  %9 = bitcast i8* %5 to %swift.context*
-  musttail call swifttailcc void %8(%swift.context* %9) #4
+  %1 = load ptr, ptr %0, align 8
+  %2 = tail call ptr @llvm.swift.async.context.addr() #4
+  store ptr %1, ptr %2, align 8
+  %async.ctx.frameptr1 = getelementptr inbounds i8, ptr %1, i64 16
+  %.reload.addr4 = getelementptr inbounds i8, ptr %1, i64 24
+  %.reload5 = load ptr, ptr %.reload.addr4, align 8
+  %.reload = load ptr, ptr %async.ctx.frameptr1, align 8
+  %3 = load ptr, ptr %0, align 8
+  store ptr %3, ptr %2, align 8
+  tail call swiftcc void @swift_task_dealloc(ptr %.reload5) #4
+  tail call void @llvm.lifetime.end.p0(i64 -1, ptr %.reload5)
+  tail call swiftcc void @swift_task_dealloc(ptr %.reload) #4
+  %4 = getelementptr inbounds i8, ptr %3, i64 8
+  %5 = load ptr, ptr %4, align 8
+  musttail call swifttailcc void %5(ptr %3) #4
   ret void
 }
 
@@ -49,10 +44,10 @@ entryresume.0:
 ; CHECK: str x9, [x8]
 
 ; Function Attrs: nounwind readnone
-declare i8** @llvm.swift.async.context.addr() #2
+declare ptr @llvm.swift.async.context.addr() #2
 
 ; Function Attrs: argmemonly nounwind
-declare dllimport swiftcc void @swift_task_dealloc(i8*) local_unnamed_addr #3
+declare dllimport swiftcc void @swift_task_dealloc(ptr) local_unnamed_addr #3
 
 attributes #0 = { argmemonly nofree nosync nounwind willreturn }
 attributes #1 = { nounwind "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" }

diff  --git a/llvm/test/CodeGen/AArch64/swift-async.ll b/llvm/test/CodeGen/AArch64/swift-async.ll
index 1b0c1b390215e..55b347d674fe8 100644
--- a/llvm/test/CodeGen/AArch64/swift-async.ll
+++ b/llvm/test/CodeGen/AArch64/swift-async.ll
@@ -5,7 +5,7 @@
 ; Important details in prologue:
 ;   * x22 is stored just below x29
 ;   * Enough stack space is allocated for everything
-define swifttailcc void @simple(i8* swiftasync %ctx) "frame-pointer"="all" {
+define swifttailcc void @simple(ptr swiftasync %ctx) "frame-pointer"="all" {
 ; CHECK-LABEL: simple:
 ; CHECK: orr x29, x29, #0x100000000000000
 ; CHECK: sub sp, sp, #32
@@ -32,7 +32,7 @@ define swifttailcc void @simple(i8* swiftasync %ctx) "frame-pointer"="all" {
   ret void
 }
 
-define swifttailcc void @more_csrs(i8* swiftasync %ctx) "frame-pointer"="all" {
+define swifttailcc void @more_csrs(ptr swiftasync %ctx) "frame-pointer"="all" {
 ; CHECK-LABEL: more_csrs:
 ; CHECK: orr x29, x29, #0x100000000000000
 ; CHECK: str x23, [sp, #-32]!
@@ -60,7 +60,7 @@ define swifttailcc void @more_csrs(i8* swiftasync %ctx) "frame-pointer"="all" {
   ret void
 }
 
-define swifttailcc void @locals(i8* swiftasync %ctx) "frame-pointer"="all" {
+define swifttailcc void @locals(ptr swiftasync %ctx) "frame-pointer"="all" {
 ; CHECK-LABEL: locals:
 ; CHECK: orr x29, x29, #0x100000000000000
 ; CHECK: sub sp, sp, #64
@@ -87,11 +87,11 @@ define swifttailcc void @locals(i8* swiftasync %ctx) "frame-pointer"="all" {
 ; CHECK: and x29, x29, #0xefffffffffffffff
 ; CHECK: add sp, sp, #64
   %var = alloca i32, i32 10
-  call void @bar(i32* %var)
+  call void @bar(ptr %var)
   ret void
 }
 
-define swifttailcc void @use_input_context(i8* swiftasync %ctx, i8** %ptr) "frame-pointer"="all" {
+define swifttailcc void @use_input_context(ptr swiftasync %ctx, ptr %ptr) "frame-pointer"="all" {
 ; CHECK-LABEL: use_input_context:
 
 ; CHECK-NOAUTH: str x22, [sp
@@ -100,11 +100,11 @@ define swifttailcc void @use_input_context(i8* swiftasync %ctx, i8** %ptr) "fram
 ; CHECK-NOT: x22
 ; CHECK: str x22, [x0]
 
-  store i8* %ctx, i8** %ptr
+  store ptr %ctx, ptr %ptr
   ret void
 }
 
-define swifttailcc i8** @context_in_func() "frame-pointer"="non-leaf" {
+define swifttailcc ptr @context_in_func() "frame-pointer"="non-leaf" {
 ; CHECK-LABEL: context_in_func:
 
 ; CHECK-NOAUTH: str xzr, [sp, #8]
@@ -114,27 +114,27 @@ define swifttailcc i8** @context_in_func() "frame-pointer"="non-leaf" {
 ; CHECK-AUTH: pacdb x17, x16
 ; CHECK-AUTH: str x17, [sp, #8]
 
-  %ptr = call i8** @llvm.swift.async.context.addr()
-  ret i8** %ptr
+  %ptr = call ptr @llvm.swift.async.context.addr()
+  ret ptr %ptr
 }
 
-define swifttailcc void @write_frame_context(i8* swiftasync %ctx, i8* %newctx) "frame-pointer"="non-leaf" {
+define swifttailcc void @write_frame_context(ptr swiftasync %ctx, ptr %newctx) "frame-pointer"="non-leaf" {
 ; CHECK-LABEL: write_frame_context:
 ; CHECK: sub x[[ADDR:[0-9]+]], x29, #8
 ; CHECK: str x0, [x[[ADDR]]]
-  %ptr = call i8** @llvm.swift.async.context.addr()
-  store i8* %newctx, i8** %ptr
+  %ptr = call ptr @llvm.swift.async.context.addr()
+  store ptr %newctx, ptr %ptr
   ret void
 }
 
-define swifttailcc void @simple_fp_elim(i8* swiftasync %ctx) "frame-pointer"="non-leaf" {
+define swifttailcc void @simple_fp_elim(ptr swiftasync %ctx) "frame-pointer"="non-leaf" {
 ; CHECK-LABEL: simple_fp_elim:
 ; CHECK-NOT: orr x29, x29, #0x100000000000000
 
   ret void
 }
 
-define swifttailcc void @large_frame(i8* swiftasync %ctx) "frame-pointer"="all" {
+define swifttailcc void @large_frame(ptr swiftasync %ctx) "frame-pointer"="all" {
 ; CHECK-LABEL: large_frame:
 ; CHECK: str x28, [sp, #-32]!
 ; CHECK: stp x29, x30, [sp, #16]
@@ -152,7 +152,7 @@ define swifttailcc void @large_frame(i8* swiftasync %ctx) "frame-pointer"="all"
 
 ; Important point is that there is just one 8-byte gap in the CSR region (right
 ; now just above d8) to realign the stack.
-define swifttailcc void @two_unpaired_csrs(i8* swiftasync) "frame-pointer"="all" {
+define swifttailcc void @two_unpaired_csrs(ptr swiftasync) "frame-pointer"="all" {
 ; CHECK-LABEL: two_unpaired_csrs:
 ; CHECK: str d8, [sp, #-48]!
 ; CHECK: str x19, [sp, #16]
@@ -167,8 +167,8 @@ define swifttailcc void @two_unpaired_csrs(i8* swiftasync) "frame-pointer"="all"
 ; CHECK: .cfi_offset b8, -48
 
   call void asm "","~{x19},~{d8}"()
-  call swifttailcc void @bar(i32* undef)
+  call swifttailcc void @bar(ptr undef)
   ret void
 }
-declare swifttailcc void @bar(i32*)
-declare i8** @llvm.swift.async.context.addr()
+declare swifttailcc void @bar(ptr)
+declare ptr @llvm.swift.async.context.addr()

diff  --git a/llvm/test/CodeGen/AArch64/swift-dynamic-async-frame.ll b/llvm/test/CodeGen/AArch64/swift-dynamic-async-frame.ll
index 94babeabab66d..772bc94989c24 100644
--- a/llvm/test/CodeGen/AArch64/swift-dynamic-async-frame.ll
+++ b/llvm/test/CodeGen/AArch64/swift-dynamic-async-frame.ll
@@ -31,6 +31,6 @@
 ; CHECK-DYNAMIC-32: orr x29, x29, x16, lsl #32
 ; CHECK-DYNAMIC-32: .weak_reference _swift_async_extendedFramePointerFlags
 
-define void @foo(i8* swiftasync) "frame-pointer"="all" {
+define void @foo(ptr swiftasync) "frame-pointer"="all" {
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/swift-error.ll b/llvm/test/CodeGen/AArch64/swift-error.ll
index 79a31c19be1c5..19671ce5a1d9b 100644
--- a/llvm/test/CodeGen/AArch64/swift-error.ll
+++ b/llvm/test/CodeGen/AArch64/swift-error.ll
@@ -2,11 +2,11 @@
 
 %swift.error = type opaque
 
-declare swiftcc void @f(%swift.error** swifterror)
+declare swiftcc void @f(ptr swifterror)
 
-define swiftcc void @g(i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %swift.error** swifterror %error) {
+define swiftcc void @g(ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr swifterror %error) {
 entry:
-  call swiftcc void @f(%swift.error** nonnull nocapture swifterror %error)
+  call swiftcc void @f(ptr nonnull nocapture swifterror %error)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/swift-return.ll b/llvm/test/CodeGen/AArch64/swift-return.ll
index 6c675b3e60c7e..5cd9182b6842f 100644
--- a/llvm/test/CodeGen/AArch64/swift-return.ll
+++ b/llvm/test/CodeGen/AArch64/swift-return.ll
@@ -41,8 +41,8 @@ declare swiftcc { i16, i8 } @gen(i32)
 define i64 @test2(i64 %key) {
 entry:
   %key.addr = alloca i64, align 4
-  store i64 %key, i64* %key.addr, align 4
-  %0 = load i64, i64* %key.addr, align 4
+  store i64 %key, ptr %key.addr, align 4
+  %0 = load i64, ptr %key.addr, align 4
   %call = call swiftcc { i64, i64, i64, i64, i64 } @gen2(i64 %0)
 
   %v3 = extractvalue { i64, i64, i64, i64, i64 } %call, 0

diff  --git a/llvm/test/CodeGen/AArch64/swifterror.ll b/llvm/test/CodeGen/AArch64/swifterror.ll
index 15dda462cbbd2..f89dd07b5c989 100644
--- a/llvm/test/CodeGen/AArch64/swifterror.ll
+++ b/llvm/test/CodeGen/AArch64/swifterror.ll
@@ -4,13 +4,13 @@
 ; RUN: llc -verify-machineinstrs -frame-pointer=all -enable-shrink-wrap=false < %s -mtriple=arm64_32-apple-ios -disable-post-ra | FileCheck -allow-deprecated-dag-overlap --check-prefix=CHECK-APPLE --check-prefix=CHECK-APPLE-ARM64_32 %s
 ; RUN: llc -verify-machineinstrs -O0 -fast-isel < %s -mtriple=arm64_32-apple-ios -disable-post-ra | FileCheck -allow-deprecated-dag-overlap --check-prefix=CHECK-O0-ARM64_32 %s
 
-declare i8* @malloc(i64)
-declare void @free(i8*)
+declare ptr @malloc(i64)
+declare void @free(ptr)
 %swift_error = type {i64, i8}
 
 ; This tests the basic usage of a swifterror parameter. "foo" is the function
 ; that takes a swifterror parameter and "caller" is the caller of "foo".
-define float @foo(%swift_error** swifterror %error_ptr_ref) {
+define float @foo(ptr swifterror %error_ptr_ref) {
 ; CHECK-APPLE-LABEL: foo:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -62,16 +62,15 @@ define float @foo(%swift_error** swifterror %error_ptr_ref) {
 ; CHECK-O0-ARM64_32-NEXT:    ret
 
 entry:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
   ret float 1.0
 }
 
 ; "caller" calls "foo" that takes a swifterror parameter.
-define float @caller(i8* %error_ref) {
+define float @caller(ptr %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #64
@@ -202,25 +201,24 @@ define float @caller(i8* %error_ref) {
 ; Access part of the error object and save it to error_ref
 
 entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  %call = call float @foo(%swift_error** swifterror %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
+  %call = call float @foo(ptr swifterror %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; "caller2" is the caller of "foo", it calls "foo" inside a loop.
-define float @caller2(i8* %error_ref) {
+define float @caller2(ptr %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller2:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #80
@@ -391,31 +389,30 @@ define float @caller2(i8* %error_ref) {
 ; Access part of the error object and save it to error_ref
 
 entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
+  %error_ptr_ref = alloca swifterror ptr
   br label %bb_loop
 bb_loop:
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  %call = call float @foo(%swift_error** swifterror %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  store ptr null, ptr %error_ptr_ref
+  %call = call float @foo(ptr swifterror %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
   %cmp = fcmp ogt float %call, 1.000000e+00
   br i1 %cmp, label %bb_end, label %bb_loop
 bb_end:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; "foo_if" is a function that takes a swifterror parameter, it sets swifterror
 ; under a certain condition.
-define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) {
+define float @foo_if(ptr swifterror %error_ptr_ref, i32 %cc) {
 ; CHECK-APPLE-LABEL: foo_if:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -501,11 +498,10 @@ entry:
   br i1 %cond, label %gen_error, label %normal
 
 gen_error:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
   ret float 1.0
 
 normal:
@@ -514,7 +510,7 @@ normal:
 
 ; "foo_loop" is a function that takes a swifterror parameter, it sets swifterror
 ; under a certain condition inside a loop.
-define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) {
+define float @foo_loop(ptr swifterror %error_ptr_ref, i32 %cc, float %cc2) {
 ; CHECK-APPLE-LABEL: foo_loop:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp d9, d8, [sp, #-48]! ; 16-byte Folded Spill
@@ -657,11 +653,10 @@ bb_loop:
   br i1 %cond, label %gen_error, label %bb_cont
 
 gen_error:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
   br label %bb_cont
 
 bb_cont:
@@ -675,7 +670,7 @@ bb_end:
 
 ; "foo_sret" is a function that takes a swifterror parameter, it also has a sret
 ; parameter.
-define void @foo_sret(%struct.S* sret(%struct.S) %agg.result, i32 %val1, %swift_error** swifterror %error_ptr_ref) {
+define void @foo_sret(ptr sret(%struct.S) %agg.result, i32 %val1, ptr swifterror %error_ptr_ref) {
 ; CHECK-APPLE-LABEL: foo_sret:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
@@ -749,18 +744,17 @@ define void @foo_sret(%struct.S* sret(%struct.S) %agg.result, i32 %val1, %swift_
 ; spill x8
 ; reload from stack
 entry:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
-  %v2 = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1
-  store i32 %val1, i32* %v2
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
+  %v2 = getelementptr inbounds %struct.S, ptr %agg.result, i32 0, i32 1
+  store i32 %val1, ptr %v2
   ret void
 }
 
 ; "caller3" calls "foo_sret" that takes a swifterror parameter.
-define float @caller3(i8* %error_ref) {
+define float @caller3(ptr %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller3:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #80
@@ -903,27 +897,26 @@ define float @caller3(i8* %error_ref) {
 ; reload from stack
 entry:
   %s = alloca %struct.S, align 8
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call void @foo_sret(%struct.S* sret(%struct.S) %s, i32 1, %swift_error** swifterror %error_ptr_ref)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
+  call void @foo_sret(ptr sret(%struct.S) %s, i32 1, ptr swifterror %error_ptr_ref)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; "foo_vararg" is a function that takes a swifterror parameter, it also has
 ; variable number of arguments.
-declare void @llvm.va_start(i8*) nounwind
-define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
+declare void @llvm.va_start(ptr) nounwind
+define float @foo_vararg(ptr swifterror %error_ptr_ref, ...) {
 ; CHECK-APPLE-AARCH64-LABEL: foo_vararg:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #48
@@ -1069,30 +1062,28 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) {
 
 
 entry:
-  %call = call i8* @malloc(i64 16)
-  %call.0 = bitcast i8* %call to %swift_error*
-  store %swift_error* %call.0, %swift_error** %error_ptr_ref
-  %tmp = getelementptr inbounds i8, i8* %call, i64 8
-  store i8 1, i8* %tmp
+  %call = call ptr @malloc(i64 16)
+  store ptr %call, ptr %error_ptr_ref
+  %tmp = getelementptr inbounds i8, ptr %call, i64 8
+  store i8 1, ptr %tmp
 
-  %args = alloca i8*, align 8
+  %args = alloca ptr, align 8
   %a10 = alloca i32, align 4
   %a11 = alloca i32, align 4
   %a12 = alloca i32, align 4
-  %v10 = bitcast i8** %args to i8*
-  call void @llvm.va_start(i8* %v10)
-  %v11 = va_arg i8** %args, i32
-  store i32 %v11, i32* %a10, align 4
-  %v12 = va_arg i8** %args, i32
-  store i32 %v12, i32* %a11, align 4
-  %v13 = va_arg i8** %args, i32
-  store i32 %v13, i32* %a12, align 4
+  call void @llvm.va_start(ptr %args)
+  %v11 = va_arg ptr %args, i32
+  store i32 %v11, ptr %a10, align 4
+  %v12 = va_arg ptr %args, i32
+  store i32 %v12, ptr %a11, align 4
+  %v13 = va_arg ptr %args, i32
+  store i32 %v13, ptr %a12, align 4
 
   ret float 1.0
 }
 
 ; "caller4" calls "foo_vararg" that takes a swifterror parameter.
-define float @caller4(i8* %error_ref) {
+define float @caller4(ptr %error_ref) {
 ; CHECK-APPLE-AARCH64-LABEL: caller4:
 ; CHECK-APPLE-AARCH64:       ; %bb.0: ; %entry
 ; CHECK-APPLE-AARCH64-NEXT:    sub sp, sp, #96
@@ -1269,37 +1260,36 @@ define float @caller4(i8* %error_ref) {
 
 ; Access part of the error object and save it to error_ref
 entry:
-  %error_ptr_ref = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr_ref
+  %error_ptr_ref = alloca swifterror ptr
+  store ptr null, ptr %error_ptr_ref
 
   %a10 = alloca i32, align 4
   %a11 = alloca i32, align 4
   %a12 = alloca i32, align 4
-  store i32 10, i32* %a10, align 4
-  store i32 11, i32* %a11, align 4
-  store i32 12, i32* %a12, align 4
-  %v10 = load i32, i32* %a10, align 4
-  %v11 = load i32, i32* %a11, align 4
-  %v12 = load i32, i32* %a12, align 4
+  store i32 10, ptr %a10, align 4
+  store i32 11, ptr %a11, align 4
+  store i32 12, ptr %a12, align 4
+  %v10 = load i32, ptr %a10, align 4
+  %v11 = load i32, ptr %a11, align 4
+  %v12 = load i32, ptr %a12, align 4
 
-  %call = call float (%swift_error**, ...) @foo_vararg(%swift_error** swifterror %error_ptr_ref, i32 %v10, i32 %v11, i32 %v12)
-  %error_from_foo = load %swift_error*, %swift_error** %error_ptr_ref
-  %had_error_from_foo = icmp ne %swift_error* %error_from_foo, null
-  %tmp = bitcast %swift_error* %error_from_foo to i8*
+  %call = call float (ptr, ...) @foo_vararg(ptr swifterror %error_ptr_ref, i32 %v10, i32 %v11, i32 %v12)
+  %error_from_foo = load ptr, ptr %error_ptr_ref
+  %had_error_from_foo = icmp ne ptr %error_from_foo, null
   br i1 %had_error_from_foo, label %handler, label %cont
 
 cont:
-  %v1 = getelementptr inbounds %swift_error, %swift_error* %error_from_foo, i64 0, i32 1
-  %t = load i8, i8* %v1
-  store i8 %t, i8* %error_ref
+  %v1 = getelementptr inbounds %swift_error, ptr %error_from_foo, i64 0, i32 1
+  %t = load i8, ptr %v1
+  store i8 %t, ptr %error_ref
   br label %handler
 handler:
-  call void @free(i8* %tmp)
+  call void @free(ptr %error_from_foo)
   ret float 1.0
 }
 
 ; Check that we don't blow up on tail calling swifterror argument functions.
-define float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref) {
+define float @tailcallswifterror(ptr swifterror %error_ptr_ref) {
 ; CHECK-APPLE-LABEL: tailcallswifterror:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -1333,10 +1323,10 @@ define float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref) {
 ; CHECK-O0-ARM64_32-NEXT:    ldr x30, [sp], #16 ; 8-byte Folded Reload
 ; CHECK-O0-ARM64_32-NEXT:    ret
 entry:
-  %0 = tail call float @tailcallswifterror(%swift_error** swifterror %error_ptr_ref)
+  %0 = tail call float @tailcallswifterror(ptr swifterror %error_ptr_ref)
   ret float %0
 }
-define swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %error_ptr_ref) {
+define swiftcc float @tailcallswifterror_swiftcc(ptr swifterror %error_ptr_ref) {
 ; CHECK-APPLE-LABEL: tailcallswifterror_swiftcc:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -1370,11 +1360,11 @@ define swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %erro
 ; CHECK-O0-ARM64_32-NEXT:    ldr x30, [sp], #16 ; 8-byte Folded Reload
 ; CHECK-O0-ARM64_32-NEXT:    ret
 entry:
-  %0 = tail call swiftcc float @tailcallswifterror_swiftcc(%swift_error** swifterror %error_ptr_ref)
+  %0 = tail call swiftcc float @tailcallswifterror_swiftcc(ptr swifterror %error_ptr_ref)
   ret float %0
 }
 
-define swiftcc void @swifterror_clobber(%swift_error** nocapture swifterror %err) {
+define swiftcc void @swifterror_clobber(ptr nocapture swifterror %err) {
 ; CHECK-APPLE-LABEL: swifterror_clobber:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -1424,7 +1414,7 @@ define swiftcc void @swifterror_clobber(%swift_error** nocapture swifterror %err
   ret void
 }
 
-define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
+define swiftcc void @swifterror_reg_clobber(ptr nocapture %err) {
 ; CHECK-APPLE-LABEL: swifterror_reg_clobber:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    stp x22, x21, [sp, #-32]! ; 16-byte Folded Spill
@@ -1476,7 +1466,7 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
   ret void
 }
 
-define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err) {
+define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, ptr swiftself, ptr nocapture swifterror %err) {
 ; CHECK-APPLE-LABEL: params_in_reg:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    sub sp, sp, #112
@@ -1649,15 +1639,15 @@ define swiftcc void @params_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* s
 ; CHECK-O0-ARM64_32-NEXT:    ldp x20, x30, [sp, #96] ; 16-byte Folded Reload
 ; CHECK-O0-ARM64_32-NEXT:    add sp, sp, #112
 ; CHECK-O0-ARM64_32-NEXT:    ret
-  %error_ptr_ref = alloca swifterror %swift_error*, align 8
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8* swiftself null, %swift_error** nocapture swifterror %error_ptr_ref)
-  call swiftcc void @params_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i8* swiftself %8, %swift_error** nocapture swifterror %err)
+  %error_ptr_ref = alloca swifterror ptr, align 8
+  store ptr null, ptr %error_ptr_ref
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, ptr swiftself null, ptr nocapture swifterror %error_ptr_ref)
+  call swiftcc void @params_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, ptr swiftself %8, ptr nocapture swifterror %err)
   ret void
 }
-declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err)
+declare swiftcc void @params_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, ptr swiftself, ptr nocapture swifterror %err)
 
-define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err) {
+define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg(i64, i64, i64, i64, i64, i64, i64, i64, ptr swiftself, ptr nocapture swifterror %err) {
 ; CHECK-APPLE-LABEL: params_and_return_in_reg:
 ; CHECK-APPLE:       ; %bb.0:
 ; CHECK-APPLE-NEXT:    sub sp, sp, #128
@@ -1961,21 +1951,21 @@ define swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_
 ; CHECK-O0-ARM64_32-NEXT:    ldr x28, [sp, #240] ; 8-byte Folded Reload
 ; CHECK-O0-ARM64_32-NEXT:    add sp, sp, #272
 ; CHECK-O0-ARM64_32-NEXT:    ret
-  %error_ptr_ref = alloca swifterror %swift_error*, align 8
-  store %swift_error* null, %swift_error** %error_ptr_ref
-  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8* swiftself null, %swift_error** nocapture swifterror %error_ptr_ref)
-  %val = call swiftcc  { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i8* swiftself %8, %swift_error** nocapture swifterror %err)
-  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i8* swiftself null, %swift_error** nocapture swifterror %error_ptr_ref)
+  %error_ptr_ref = alloca swifterror ptr, align 8
+  store ptr null, ptr %error_ptr_ref
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, ptr swiftself null, ptr nocapture swifterror %error_ptr_ref)
+  %val = call swiftcc  { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, ptr swiftself %8, ptr nocapture swifterror %err)
+  call swiftcc void @params_in_reg2(i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, ptr swiftself null, ptr nocapture swifterror %error_ptr_ref)
   ret { i64, i64, i64, i64, i64, i64, i64, i64 } %val
 }
 
-declare swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, i8* swiftself, %swift_error** nocapture swifterror %err)
+declare swiftcc { i64, i64, i64, i64, i64, i64, i64, i64 } @params_and_return_in_reg2(i64, i64, i64, i64, i64, i64, i64, i64, ptr swiftself, ptr nocapture swifterror %err)
 
-declare void @acallee(i8*)
+declare void @acallee(ptr)
 
 ; Make sure we don't tail call if the caller returns a swifterror value. We
 ; would have to move into the swifterror register before the tail call.
-define swiftcc void @tailcall_from_swifterror(%swift_error** swifterror %error_ptr_ref) {
+define swiftcc void @tailcall_from_swifterror(ptr swifterror %error_ptr_ref) {
 ; CHECK-APPLE-LABEL: tailcall_from_swifterror:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    str x19, [sp, #-32]! ; 8-byte Folded Spill
@@ -2025,14 +2015,14 @@ define swiftcc void @tailcall_from_swifterror(%swift_error** swifterror %error_p
 ; CHECK-O0-ARM64_32-NEXT:    add sp, sp, #32
 ; CHECK-O0-ARM64_32-NEXT:    ret
 entry:
-  tail call void @acallee(i8* null)
+  tail call void @acallee(ptr null)
   ret void
 }
 
-declare swiftcc void @foo2(%swift_error** swifterror)
+declare swiftcc void @foo2(ptr swifterror)
 
 ; Make sure we properly assign registers during fast-isel.
-define swiftcc %swift_error* @testAssign(i8* %error_ref) {
+define swiftcc ptr @testAssign(ptr %error_ref) {
 ; CHECK-APPLE-LABEL: testAssign:
 ; CHECK-APPLE:       ; %bb.0: ; %entry
 ; CHECK-APPLE-NEXT:    sub sp, sp, #48
@@ -2098,12 +2088,12 @@ define swiftcc %swift_error* @testAssign(i8* %error_ref) {
 ; CHECK-O0-ARM64_32-NEXT:    add sp, sp, #48
 ; CHECK-O0-ARM64_32-NEXT:    ret
 entry:
-  %error_ptr = alloca swifterror %swift_error*
-  store %swift_error* null, %swift_error** %error_ptr
-  call swiftcc void @foo2(%swift_error** swifterror %error_ptr)
+  %error_ptr = alloca swifterror ptr
+  store ptr null, ptr %error_ptr
+  call swiftcc void @foo2(ptr swifterror %error_ptr)
   br label %a
 
 a:
-  %error = load %swift_error*, %swift_error** %error_ptr
-  ret %swift_error* %error
+  %error = load ptr, ptr %error_ptr
+  ret ptr %error
 }

diff  --git a/llvm/test/CodeGen/AArch64/swiftself-scavenger.ll b/llvm/test/CodeGen/AArch64/swiftself-scavenger.ll
index 4caae77821665..030593986be4a 100644
--- a/llvm/test/CodeGen/AArch64/swiftself-scavenger.ll
+++ b/llvm/test/CodeGen/AArch64/swiftself-scavenger.ll
@@ -8,75 +8,75 @@
 ; CHECK: ldr [[REG]], [sp]
 target triple = "arm64-apple-ios"
 
- at ptr8 = external global i8*
+ at ptr8 = external global ptr
 @ptr64 = external global i64
 
-define hidden swiftcc void @func(i8* swiftself %arg) #0 {
+define hidden swiftcc void @func(ptr swiftself %arg) #0 {
 bb:
-  %stack0 = alloca i8*, i32 5000, align 8
-  %stack1 = alloca i8*, i32 32, align 8
+  %stack0 = alloca ptr, i32 5000, align 8
+  %stack1 = alloca ptr, i32 32, align 8
 
-  %v0  = load volatile i64, i64* @ptr64, align 8
-  %v1  = load volatile i64, i64* @ptr64, align 8
-  %v2  = load volatile i64, i64* @ptr64, align 8
-  %v3  = load volatile i64, i64* @ptr64, align 8
-  %v4  = load volatile i64, i64* @ptr64, align 8
-  %v5  = load volatile i64, i64* @ptr64, align 8
-  %v6  = load volatile i64, i64* @ptr64, align 8
-  %v7  = load volatile i64, i64* @ptr64, align 8
-  %v8  = load volatile i64, i64* @ptr64, align 8
-  %v9  = load volatile i64, i64* @ptr64, align 8
-  %v10 = load volatile i64, i64* @ptr64, align 8
-  %v11 = load volatile i64, i64* @ptr64, align 8
-  %v12 = load volatile i64, i64* @ptr64, align 8
-  %v13 = load volatile i64, i64* @ptr64, align 8
-  %v14 = load volatile i64, i64* @ptr64, align 8
-  %v15 = load volatile i64, i64* @ptr64, align 8
-  %v16 = load volatile i64, i64* @ptr64, align 8
-  %v17 = load volatile i64, i64* @ptr64, align 8
-  %v18 = load volatile i64, i64* @ptr64, align 8
-  %v19 = load volatile i64, i64* @ptr64, align 8
-  %v20 = load volatile i64, i64* @ptr64, align 8
-  %v21 = load volatile i64, i64* @ptr64, align 8
-  %v22 = load volatile i64, i64* @ptr64, align 8
-  %v23 = load volatile i64, i64* @ptr64, align 8
-  %v24 = load volatile i64, i64* @ptr64, align 8
-  %v25 = load volatile i64, i64* @ptr64, align 8
+  %v0  = load volatile i64, ptr @ptr64, align 8
+  %v1  = load volatile i64, ptr @ptr64, align 8
+  %v2  = load volatile i64, ptr @ptr64, align 8
+  %v3  = load volatile i64, ptr @ptr64, align 8
+  %v4  = load volatile i64, ptr @ptr64, align 8
+  %v5  = load volatile i64, ptr @ptr64, align 8
+  %v6  = load volatile i64, ptr @ptr64, align 8
+  %v7  = load volatile i64, ptr @ptr64, align 8
+  %v8  = load volatile i64, ptr @ptr64, align 8
+  %v9  = load volatile i64, ptr @ptr64, align 8
+  %v10 = load volatile i64, ptr @ptr64, align 8
+  %v11 = load volatile i64, ptr @ptr64, align 8
+  %v12 = load volatile i64, ptr @ptr64, align 8
+  %v13 = load volatile i64, ptr @ptr64, align 8
+  %v14 = load volatile i64, ptr @ptr64, align 8
+  %v15 = load volatile i64, ptr @ptr64, align 8
+  %v16 = load volatile i64, ptr @ptr64, align 8
+  %v17 = load volatile i64, ptr @ptr64, align 8
+  %v18 = load volatile i64, ptr @ptr64, align 8
+  %v19 = load volatile i64, ptr @ptr64, align 8
+  %v20 = load volatile i64, ptr @ptr64, align 8
+  %v21 = load volatile i64, ptr @ptr64, align 8
+  %v22 = load volatile i64, ptr @ptr64, align 8
+  %v23 = load volatile i64, ptr @ptr64, align 8
+  %v24 = load volatile i64, ptr @ptr64, align 8
+  %v25 = load volatile i64, ptr @ptr64, align 8
 
   ; this should exceed stack-relative addressing limits and need an emergency
   ; spill slot.
-  %s = getelementptr inbounds i8*, i8** %stack0, i64 4092
-  store volatile i8* null, i8** %s
-  store volatile i8* null, i8** %stack1
+  %s = getelementptr inbounds ptr, ptr %stack0, i64 4092
+  store volatile ptr null, ptr %s
+  store volatile ptr null, ptr %stack1
 
-  store volatile i64 %v0,  i64* @ptr64, align 8
-  store volatile i64 %v1,  i64* @ptr64, align 8
-  store volatile i64 %v2,  i64* @ptr64, align 8
-  store volatile i64 %v3,  i64* @ptr64, align 8
-  store volatile i64 %v4,  i64* @ptr64, align 8
-  store volatile i64 %v5,  i64* @ptr64, align 8
-  store volatile i64 %v6,  i64* @ptr64, align 8
-  store volatile i64 %v7,  i64* @ptr64, align 8
-  store volatile i64 %v8,  i64* @ptr64, align 8
-  store volatile i64 %v9,  i64* @ptr64, align 8
-  store volatile i64 %v10, i64* @ptr64, align 8
-  store volatile i64 %v11, i64* @ptr64, align 8
-  store volatile i64 %v12, i64* @ptr64, align 8
-  store volatile i64 %v13, i64* @ptr64, align 8
-  store volatile i64 %v14, i64* @ptr64, align 8
-  store volatile i64 %v15, i64* @ptr64, align 8
-  store volatile i64 %v16, i64* @ptr64, align 8
-  store volatile i64 %v17, i64* @ptr64, align 8
-  store volatile i64 %v18, i64* @ptr64, align 8
-  store volatile i64 %v19, i64* @ptr64, align 8
-  store volatile i64 %v20, i64* @ptr64, align 8
-  store volatile i64 %v21, i64* @ptr64, align 8
-  store volatile i64 %v22, i64* @ptr64, align 8
-  store volatile i64 %v23, i64* @ptr64, align 8
-  store volatile i64 %v24, i64* @ptr64, align 8
-  store volatile i64 %v25, i64* @ptr64, align 8
+  store volatile i64 %v0,  ptr @ptr64, align 8
+  store volatile i64 %v1,  ptr @ptr64, align 8
+  store volatile i64 %v2,  ptr @ptr64, align 8
+  store volatile i64 %v3,  ptr @ptr64, align 8
+  store volatile i64 %v4,  ptr @ptr64, align 8
+  store volatile i64 %v5,  ptr @ptr64, align 8
+  store volatile i64 %v6,  ptr @ptr64, align 8
+  store volatile i64 %v7,  ptr @ptr64, align 8
+  store volatile i64 %v8,  ptr @ptr64, align 8
+  store volatile i64 %v9,  ptr @ptr64, align 8
+  store volatile i64 %v10, ptr @ptr64, align 8
+  store volatile i64 %v11, ptr @ptr64, align 8
+  store volatile i64 %v12, ptr @ptr64, align 8
+  store volatile i64 %v13, ptr @ptr64, align 8
+  store volatile i64 %v14, ptr @ptr64, align 8
+  store volatile i64 %v15, ptr @ptr64, align 8
+  store volatile i64 %v16, ptr @ptr64, align 8
+  store volatile i64 %v17, ptr @ptr64, align 8
+  store volatile i64 %v18, ptr @ptr64, align 8
+  store volatile i64 %v19, ptr @ptr64, align 8
+  store volatile i64 %v20, ptr @ptr64, align 8
+  store volatile i64 %v21, ptr @ptr64, align 8
+  store volatile i64 %v22, ptr @ptr64, align 8
+  store volatile i64 %v23, ptr @ptr64, align 8
+  store volatile i64 %v24, ptr @ptr64, align 8
+  store volatile i64 %v25, ptr @ptr64, align 8
   
   ; use swiftself parameter late so it stays alive throughout the function.
-  store volatile i8* %arg, i8** @ptr8
+  store volatile ptr %arg, ptr @ptr8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/swiftself.ll b/llvm/test/CodeGen/AArch64/swiftself.ll
index 544f4471e9817..3a8368afc4fd8 100644
--- a/llvm/test/CodeGen/AArch64/swiftself.ll
+++ b/llvm/test/CodeGen/AArch64/swiftself.ll
@@ -7,8 +7,8 @@
 ; CHECK-LABEL: swiftself_param:
 ; CHECK: mov x0, x20
 ; CHECK-NEXT: ret
-define i8* @swiftself_param(i8* swiftself %addr0) {
-  ret i8 *%addr0
+define ptr @swiftself_param(ptr swiftself %addr0) {
+  ret ptr %addr0
 }
 
 ; Check that x20 is used to pass a swiftself argument.
@@ -16,9 +16,9 @@ define i8* @swiftself_param(i8* swiftself %addr0) {
 ; CHECK: mov x20, x0
 ; CHECK: bl {{_?}}swiftself_param
 ; CHECK: ret
-define i8 *@call_swiftself(i8* %arg) {
-  %res = call i8 *@swiftself_param(i8* swiftself %arg)
-  ret i8 *%res
+define ptr @call_swiftself(ptr %arg) {
+  %res = call ptr @swiftself_param(ptr swiftself %arg)
+  ret ptr %res
 }
 
 ; x20 should be saved by the callee even if used for swiftself
@@ -27,9 +27,9 @@ define i8 *@call_swiftself(i8* %arg) {
 ; ...
 ; CHECK: {{ldp|ldr}} {{.*}}x20{{.*}}sp
 ; CHECK: ret
-define i8 *@swiftself_clobber(i8* swiftself %addr0) {
+define ptr @swiftself_clobber(ptr swiftself %addr0) {
   call void asm sideeffect "", "~{x20}"()
-  ret i8 *%addr0
+  ret ptr %addr0
 }
 
 ; Demonstrate that we do not need any movs when calling multiple functions
@@ -40,9 +40,9 @@ define i8 *@swiftself_clobber(i8* swiftself %addr0) {
 ; OPT-NOT: mov{{.*}}x20
 ; OPT-NEXT: bl {{_?}}swiftself_param
 ; OPT: ret
-define void @swiftself_passthrough(i8* swiftself %addr0) {
-  call i8 *@swiftself_param(i8* swiftself %addr0)
-  call i8 *@swiftself_param(i8* swiftself %addr0)
+define void @swiftself_passthrough(ptr swiftself %addr0) {
+  call ptr @swiftself_param(ptr swiftself %addr0)
+  call ptr @swiftself_param(ptr swiftself %addr0)
   ret void
 }
 
@@ -52,10 +52,10 @@ define void @swiftself_passthrough(i8* swiftself %addr0) {
 ; OPTAARCH64: b {{_?}}swiftself_param
 ; OPTAARCH64-NOT: ret
 ; OPTARM64_32: b {{_?}}swiftself_param
-define i8* @swiftself_tail(i8* swiftself %addr0) {
+define ptr @swiftself_tail(ptr swiftself %addr0) {
   call void asm sideeffect "", "~{x20}"()
-  %res = musttail call i8* @swiftself_param(i8* swiftself %addr0)
-  ret i8* %res
+  %res = musttail call ptr @swiftself_param(ptr swiftself %addr0)
+  ret ptr %res
 }
 
 ; We can not use a tail call if the callee swiftself is not the same as the
@@ -64,15 +64,15 @@ define i8* @swiftself_tail(i8* swiftself %addr0) {
 ; CHECK: mov x20, x0
 ; CHECK: bl {{_?}}swiftself_param
 ; CHECK: ret
-define i8* @swiftself_notail(i8* swiftself %addr0, i8* %addr1) nounwind {
-  %res = tail call i8* @swiftself_param(i8* swiftself %addr1)
-  ret i8* %res
+define ptr @swiftself_notail(ptr swiftself %addr0, ptr %addr1) nounwind {
+  %res = tail call ptr @swiftself_param(ptr swiftself %addr1)
+  ret ptr %res
 }
 
 ; We cannot pretend that 'x0' is alive across the thisreturn_attribute call as
 ; we normally would. We marked the first parameter with swiftself which means it
 ; will no longer be passed in x0.
-declare swiftcc i8* @thisreturn_attribute(i8* returned swiftself)
+declare swiftcc ptr @thisreturn_attribute(ptr returned swiftself)
 ; OPTAARCH64-LABEL: swiftself_nothisreturn:
 ; OPTAARCH64-DAG: ldr  x20, [x20]
 ; OPTAARCH64-DAG: mov [[CSREG:x[1-9].*]], x8
@@ -86,10 +86,10 @@ declare swiftcc i8* @thisreturn_attribute(i8* returned swiftself)
 ; OPTARM64_32: bl {{_?}}thisreturn_attribute
 ; OPTARM64_32: str w0, [[[CSREG]]
 ; OPTARM64_32: ret
-define hidden swiftcc void @swiftself_nothisreturn(i8** noalias nocapture sret(i8*), i8** noalias nocapture readonly swiftself) {
+define hidden swiftcc void @swiftself_nothisreturn(ptr noalias nocapture sret(ptr), ptr noalias nocapture readonly swiftself) {
 entry:
-  %2 = load i8*, i8** %1, align 8
-  %3 = tail call swiftcc i8* @thisreturn_attribute(i8* swiftself %2)
-  store i8* %3, i8** %0, align 8
+  %2 = load ptr, ptr %1, align 8
+  %3 = tail call swiftcc ptr @thisreturn_attribute(ptr swiftself %2)
+  store ptr %3, ptr %0, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/swifttail-arm64_32.ll b/llvm/test/CodeGen/AArch64/swifttail-arm64_32.ll
index 371eaedcf0d57..f70ac65583fc8 100644
--- a/llvm/test/CodeGen/AArch64/swifttail-arm64_32.ll
+++ b/llvm/test/CodeGen/AArch64/swifttail-arm64_32.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=arm64_32-apple-watchos %s -o - | FileCheck %s
 
-declare swifttailcc void @pointer_align_callee([8 x i64], i32, i32, i32, i8*)
-define swifttailcc void @pointer_align_caller(i8* swiftasync %as, i8* %in) "frame-pointer"="all" {
+declare swifttailcc void @pointer_align_callee([8 x i64], i32, i32, i32, ptr)
+define swifttailcc void @pointer_align_caller(ptr swiftasync %as, ptr %in) "frame-pointer"="all" {
 ; CHECK-LABEL: pointer_align_caller:
 ; CHECK: sub sp, sp, #48
 ; CHECK: mov [[TWO:w[0-9]+]], #2
@@ -11,6 +11,6 @@ define swifttailcc void @pointer_align_caller(i8* swiftasync %as, i8* %in) "fram
 ; CHECK: add sp, sp, #32
 ; CHECK: b _pointer_align_callee
   alloca i32
-  musttail call swifttailcc void @pointer_align_callee([8 x i64] undef, i32 0, i32 1, i32 2, i8* %in)
+  musttail call swifttailcc void @pointer_align_callee([8 x i64] undef, i32 0, i32 1, i32 2, ptr %in)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/swifttail-call.ll b/llvm/test/CodeGen/AArch64/swifttail-call.ll
index 3efd9f98f40dc..01532b9e1cc5e 100644
--- a/llvm/test/CodeGen/AArch64/swifttail-call.ll
+++ b/llvm/test/CodeGen/AArch64/swifttail-call.ll
@@ -222,14 +222,14 @@ define swifttailcc void @fromtail_toC() #0 {
   ret void
 }
 
-declare swifttailcc i8* @SwiftSelf(i8 * swiftasync %context, i8* swiftself %closure)
-define swiftcc i8* @CallSwiftSelf(i8* swiftself %closure, i8* %context) #0 {
+declare swifttailcc ptr @SwiftSelf(ptr swiftasync %context, ptr swiftself %closure)
+define swiftcc ptr @CallSwiftSelf(ptr swiftself %closure, ptr %context) #0 {
 ; CHECK-LABEL: CallSwiftSelf:
 ; CHECK: stp x20
   ;call void asm "","~{r13}"() ; We get a push r13 but why not with the call
   ; below?
-  %res = call swifttailcc i8* @SwiftSelf(i8 * swiftasync %context, i8* swiftself %closure)
-  ret i8* %res
+  %res = call swifttailcc ptr @SwiftSelf(ptr swiftasync %context, ptr swiftself %closure)
+  ret ptr %res
 }
 
 attributes #0 = { uwtable }
\ No newline at end of file

diff  --git a/llvm/test/CodeGen/AArch64/tagged-globals-pic.ll b/llvm/test/CodeGen/AArch64/tagged-globals-pic.ll
index 2fc8cf546e3b4..6ee0dd193c3dc 100644
--- a/llvm/test/CodeGen/AArch64/tagged-globals-pic.ll
+++ b/llvm/test/CodeGen/AArch64/tagged-globals-pic.ll
@@ -21,13 +21,13 @@ target triple = "aarch64-unknown-linux-android"
 @global = external global i32
 declare void @func()
 
-define i32* @global_addr() #0 {
+define ptr @global_addr() #0 {
   ; CHECK-PIC: global_addr:
   ; CHECK-PIC: adrp [[REG:x[0-9]+]], :got:global
   ; CHECK-PIC: ldr x0, [[[REG]], :got_lo12:global]
   ; CHECK-PIC: ret
 
-  ret i32* @global
+  ret ptr @global
 }
 
 define i32 @global_load() #0 {
@@ -49,7 +49,7 @@ define i32 @global_load() #0 {
   ; CHECK-PIC: ldr w0, [[[REG]]]
   ; CHECK-PIC: ret
 
-  %load = load i32, i32* @global
+  %load = load i32, ptr @global
   ret i32 %load
 }
 
@@ -72,17 +72,17 @@ define void @global_store() #0 {
   ; CHECK-PIC: str wzr, [[[REG]]]
   ; CHECK-PIC: ret
 
-  store i32 0, i32* @global
+  store i32 0, ptr @global
   ret void
 }
 
-define void ()* @func_addr() #0 {
+define ptr @func_addr() #0 {
   ; CHECK-PIC: func_addr:
   ; CHECK-PIC: adrp [[REG:x[0-9]+]], :got:func
   ; CHECK-PIC: ldr  x0, [[[REG]], :got_lo12:func]
   ; CHECK-PIC: ret
 
-  ret void ()* @func
+  ret ptr @func
 }
 
 attributes #0 = { "target-features"="+tagged-globals" }

diff  --git a/llvm/test/CodeGen/AArch64/tagged-globals-static.ll b/llvm/test/CodeGen/AArch64/tagged-globals-static.ll
index 4f2719ee7543e..70114e1c2cf05 100644
--- a/llvm/test/CodeGen/AArch64/tagged-globals-static.ll
+++ b/llvm/test/CodeGen/AArch64/tagged-globals-static.ll
@@ -10,7 +10,7 @@ target triple = "aarch64-unknown-linux-android"
 @global = external dso_local global i32
 declare dso_local void @func()
 
-define i32* @global_addr() #0 {
+define ptr @global_addr() #0 {
   ; Static relocation model has common codegen between SelectionDAGISel and
   ; GlobalISel when the address-taken of a global isn't folded into a load or
   ; store instruction.
@@ -20,7 +20,7 @@ define i32* @global_addr() #0 {
   ; CHECK-STATIC: add x0, [[REG]], :lo12:global
   ; CHECK-STATIC: ret
 
-  ret i32* @global
+  ret ptr @global
 }
 
 define i32 @global_load() #0 {
@@ -36,7 +36,7 @@ define i32 @global_load() #0 {
   ; CHECK-GLOBALISEL: ldr w0, [[[REG]]]
   ; CHECK-GLOBALISEL: ret
 
-  %load = load i32, i32* @global
+  %load = load i32, ptr @global
   ret i32 %load
 }
 
@@ -53,17 +53,17 @@ define void @global_store() #0 {
   ; CHECK-GLOBALISEL: str wzr, [[[REG]]]
   ; CHECK-GLOBALISEL: ret
 
-  store i32 0, i32* @global
+  store i32 0, ptr @global
   ret void
 }
 
-define void ()* @func_addr() #0 {
+define ptr @func_addr() #0 {
   ; CHECK-STATIC: func_addr:
   ; CHECK-STATIC: adrp [[REG:x[0-9]+]], func
   ; CHECK-STATIC: add x0, [[REG]], :lo12:func
   ; CHECK-STATIC: ret
 
-  ret void ()* @func
+  ret ptr @func
 }
 
 attributes #0 = { "target-features"="+tagged-globals" }

diff  --git a/llvm/test/CodeGen/AArch64/tagp.ll b/llvm/test/CodeGen/AArch64/tagp.ll
index 0af6538981817..df7b47a87989d 100644
--- a/llvm/test/CodeGen/AArch64/tagp.ll
+++ b/llvm/test/CodeGen/AArch64/tagp.ll
@@ -1,17 +1,17 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte | FileCheck %s
 
-define i8* @tagp2(i8* %p, i8* %tag) {
+define ptr @tagp2(ptr %p, ptr %tag) {
 entry:
 ; CHECK-LABEL: tagp2:
 ; CHECK: subp [[R:x[0-9]+]], x0, x1
 ; CHECK: add  [[R]], [[R]], x1
 ; CHECK: addg x0, [[R]], #0, #2
 ; CHECK: ret
-  %q = call i8* @llvm.aarch64.tagp.p0i8(i8* %p, i8* %tag, i64 2)
-  ret i8* %q
+  %q = call ptr @llvm.aarch64.tagp.p0(ptr %p, ptr %tag, i64 2)
+  ret ptr %q
 }
 
-define i8* @irg_tagp_unrelated(i8* %p, i8* %q) {
+define ptr @irg_tagp_unrelated(ptr %p, ptr %q) {
 entry:
 ; CHECK-LABEL: irg_tagp_unrelated:
 ; CHECK: irg  [[R0:x[0-9]+]], x0{{$}}
@@ -19,12 +19,12 @@ entry:
 ; CHECK: add  [[R]], [[R0]], x1
 ; CHECK: addg x0, [[R]], #0, #1
 ; CHECK: ret
-  %p1 = call i8* @llvm.aarch64.irg(i8* %p, i64 0)
-  %q1 = call i8* @llvm.aarch64.tagp.p0i8(i8* %p1, i8* %q, i64 1)
-  ret i8* %q1
+  %p1 = call ptr @llvm.aarch64.irg(ptr %p, i64 0)
+  %q1 = call ptr @llvm.aarch64.tagp.p0(ptr %p1, ptr %q, i64 1)
+  ret ptr %q1
 }
 
-define i8* @tagp_alloca(i8* %tag) {
+define ptr @tagp_alloca(ptr %tag) {
 entry:
 ; CHECK-LABEL: tagp_alloca:
 ; CHECK: mov  [[R0:x[0-9]+]], sp{{$}}
@@ -33,9 +33,9 @@ entry:
 ; CHECK: addg x0, [[R]], #0, #3
 ; CHECK: ret
   %a = alloca i8, align 16
-  %q = call i8* @llvm.aarch64.tagp.p0i8(i8* %a, i8* %tag, i64 3)
-  ret i8* %q
+  %q = call ptr @llvm.aarch64.tagp.p0(ptr %a, ptr %tag, i64 3)
+  ret ptr %q
 }
 
-declare i8* @llvm.aarch64.irg(i8* %p, i64 %exclude)
-declare i8* @llvm.aarch64.tagp.p0i8(i8* %p, i8* %tag, i64 %ofs)
+declare ptr @llvm.aarch64.irg(ptr %p, i64 %exclude)
+declare ptr @llvm.aarch64.tagp.p0(ptr %p, ptr %tag, i64 %ofs)

diff  --git a/llvm/test/CodeGen/AArch64/tailcall-bitcast-memcpy.ll b/llvm/test/CodeGen/AArch64/tailcall-bitcast-memcpy.ll
index 88a07498ab3bb..0f337eea836c4 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-bitcast-memcpy.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-bitcast-memcpy.ll
@@ -7,12 +7,10 @@ target triple = "aarch64-arm-none-eabi"
 ;CHECK-NOT: mov
 ;CHECK-NOT: ldp
 ;CHECK-NEXT: b memcpy
-define dso_local i32* @wmemcpy(i32* returned, i32* nocapture readonly, i64) local_unnamed_addr {
-  %4 = bitcast i32* %0 to i8*
-  %5 = bitcast i32* %1 to i8*
-  %6 = shl i64 %2, 2
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %5, i64 %6, i1 false)
-  ret i32* %0
+define dso_local ptr @wmemcpy(ptr returned, ptr nocapture readonly, i64) local_unnamed_addr {
+  %4 = shl i64 %2, 2
+  tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 %4, i1 false)
+  ret ptr %0
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)

diff  --git a/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll b/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
index acbf3756141ba..c74bfb994998b 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-explicit-sret.ll
@@ -4,14 +4,14 @@
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
 ; Check that we don't try to tail-call with a non-forwarded sret parameter.
-declare void @test_explicit_sret(i1024* sret(i1024)) #0
+declare void @test_explicit_sret(ptr sret(i1024)) #0
 
 ; This is the only OK case, where we forward the explicit sret pointer.
 
 ; CHECK-LABEL: _test_tailcall_explicit_sret:
 ; CHECK-NEXT: b _test_explicit_sret
-define void @test_tailcall_explicit_sret(i1024* sret(i1024) %arg) #0 {
-  tail call void @test_explicit_sret(i1024* sret(i1024) %arg)
+define void @test_tailcall_explicit_sret(ptr sret(i1024) %arg) #0 {
+  tail call void @test_explicit_sret(ptr sret(i1024) %arg)
   ret void
 }
 
@@ -19,8 +19,8 @@ define void @test_tailcall_explicit_sret(i1024* sret(i1024) %arg) #0 {
 ; CHECK-NOT: mov  x8
 ; CHECK: bl _test_explicit_sret
 ; CHECK: ret
-define void @test_call_explicit_sret(i1024* sret(i1024) %arg) #0 {
-  call void @test_explicit_sret(i1024* sret(i1024) %arg)
+define void @test_call_explicit_sret(ptr sret(i1024) %arg) #0 {
+  call void @test_explicit_sret(ptr sret(i1024) %arg)
   ret void
 }
 
@@ -30,7 +30,7 @@ define void @test_call_explicit_sret(i1024* sret(i1024) %arg) #0 {
 ; CHECK: ret
 define void @test_tailcall_explicit_sret_alloca_unused() #0 {
   %l = alloca i1024, align 8
-  tail call void @test_explicit_sret(i1024* sret(i1024) %l)
+  tail call void @test_explicit_sret(ptr sret(i1024) %l)
   ret void
 }
 
@@ -40,11 +40,11 @@ define void @test_tailcall_explicit_sret_alloca_unused() #0 {
 ; CHECK: mov  x8, sp
 ; CHECK-NEXT: bl _test_explicit_sret
 ; CHECK: ret
-define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
+define void @test_tailcall_explicit_sret_alloca_dummyusers(ptr %ptr) #0 {
   %l = alloca i1024, align 8
-  %r = load i1024, i1024* %ptr, align 8
-  store i1024 %r, i1024* %l, align 8
-  tail call void @test_explicit_sret(i1024* sret(i1024) %l)
+  %r = load i1024, ptr %ptr, align 8
+  store i1024 %r, ptr %l, align 8
+  tail call void @test_explicit_sret(ptr sret(i1024) %l)
   ret void
 }
 
@@ -54,9 +54,9 @@ define void @test_tailcall_explicit_sret_alloca_dummyusers(i1024* %ptr) #0 {
 ; CHECK: add  x8, x0, #128
 ; CHECK-NEXT: bl _test_explicit_sret
 ; CHECK: ret
-define void @test_tailcall_explicit_sret_gep(i1024* %ptr) #0 {
-  %ptr2 = getelementptr i1024, i1024* %ptr, i32 1
-  tail call void @test_explicit_sret(i1024* sret(i1024) %ptr2)
+define void @test_tailcall_explicit_sret_gep(ptr %ptr) #0 {
+  %ptr2 = getelementptr i1024, ptr %ptr, i32 1
+  tail call void @test_explicit_sret(ptr sret(i1024) %ptr2)
   ret void
 }
 
@@ -69,8 +69,8 @@ define void @test_tailcall_explicit_sret_gep(i1024* %ptr) #0 {
 ; CHECK: ret
 define i1024 @test_tailcall_explicit_sret_alloca_returned() #0 {
   %l = alloca i1024, align 8
-  tail call void @test_explicit_sret(i1024* sret(i1024) %l)
-  %r = load i1024, i1024* %l, align 8
+  tail call void @test_explicit_sret(ptr sret(i1024) %l)
+  %r = load i1024, ptr %l, align 8
   ret i1024 %r
 }
 
@@ -82,11 +82,11 @@ define i1024 @test_tailcall_explicit_sret_alloca_returned() #0 {
 ; CHECK: ldr [[CALLERSRET1:q[0-9]+]], [sp]
 ; CHECK: str [[CALLERSRET1:q[0-9]+]], [x[[CALLERX8NUM]]]
 ; CHECK: ret
-define void @test_indirect_tailcall_explicit_sret_nosret_arg(i1024* sret(i1024) %arg, void (i1024*)* %f) #0 {
+define void @test_indirect_tailcall_explicit_sret_nosret_arg(ptr sret(i1024) %arg, ptr %f) #0 {
   %l = alloca i1024, align 8
-  tail call void %f(i1024* %l)
-  %r = load i1024, i1024* %l, align 8
-  store i1024 %r, i1024* %arg, align 8
+  tail call void %f(ptr %l)
+  %r = load i1024, ptr %l, align 8
+  store i1024 %r, ptr %arg, align 8
   ret void
 }
 
@@ -97,9 +97,9 @@ define void @test_indirect_tailcall_explicit_sret_nosret_arg(i1024* sret(i1024)
 ; CHECK: ldr [[CALLERSRET1:q[0-9]+]], [sp]
 ; CHECK: str [[CALLERSRET1:q[0-9]+]], [x[[CALLERX8NUM]]]
 ; CHECK: ret
-define void @test_indirect_tailcall_explicit_sret_(i1024* sret(i1024) %arg, i1024 ()* %f) #0 {
+define void @test_indirect_tailcall_explicit_sret_(ptr sret(i1024) %arg, ptr %f) #0 {
   %ret = tail call i1024 %f()
-  store i1024 %ret, i1024* %arg, align 8
+  store i1024 %ret, ptr %arg, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll b/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll
index f449a7e06588a..857bc7c93c2e1 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-implicit-sret.ll
@@ -38,7 +38,7 @@ define i1024 @test_tailcall_sret() #0 {
 ; CHECK: ldr [[CALLERSRET1:q[0-9]+]], [sp]
 ; CHECK: str [[CALLERSRET1:q[0-9]+]], [x[[CALLERX8NUM]]]
 ; CHECK: ret
-define i1024 @test_indirect_tailcall_sret(i1024 ()* %f) #0 {
+define i1024 @test_indirect_tailcall_sret(ptr %f) #0 {
   %a = tail call i1024 %f()
   ret i1024 %a
 }

diff  --git a/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll b/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll
index 8fd302fe553c1..1c405e5346a74 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-mem-intrinsics.ll
@@ -3,30 +3,30 @@
 
 ; CHECK-LABEL: tail_memcpy:
 ; CHECK: b memcpy
-define void @tail_memcpy(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
+define void @tail_memcpy(ptr nocapture %p, ptr nocapture readonly %q, i32 %n) #0 {
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr %p, ptr %q, i32 %n, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: tail_memmove:
 ; CHECK: b memmove
-define void @tail_memmove(i8* nocapture %p, i8* nocapture readonly %q, i32 %n) #0 {
+define void @tail_memmove(ptr nocapture %p, ptr nocapture readonly %q, i32 %n) #0 {
 entry:
-  tail call void @llvm.memmove.p0i8.p0i8.i32(i8* %p, i8* %q, i32 %n, i1 false)
+  tail call void @llvm.memmove.p0.p0.i32(ptr %p, ptr %q, i32 %n, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: tail_memset:
 ; CHECK: b memset
-define void @tail_memset(i8* nocapture %p, i8 %c, i32 %n) #0 {
+define void @tail_memset(ptr nocapture %p, i8 %c, i32 %n) #0 {
 entry:
-  tail call void @llvm.memset.p0i8.i32(i8* %p, i8 %c, i32 %n, i1 false)
+  tail call void @llvm.memset.p0.i32(ptr %p, i8 %c, i32 %n, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) #0
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #0
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1) #0
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) #0
 
 attributes #0 = { nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll b/llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll
index b439fdbeb0dae..3338485bb5a55 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=arm64-apple-ios %s -o - | FileCheck %s
 
-define swifttailcc void @foo(i8* %call) ssp {
+define swifttailcc void @foo(ptr %call) ssp {
 ; CHECK-LABEL: foo:
   %var = alloca [28 x i8], align 16
   br i1 undef, label %if.then, label %if.end
@@ -12,13 +12,13 @@ if.end:
   ; CHECK: mov x[[NULL:[0-9]+]], xzr
   ; CHECK: ldr [[FPTR:x[0-9]+]], [x[[NULL]]]
   ; CHECK: br [[FPTR]]
-  call void @llvm.dbg.value(metadata i8* %call, metadata !19, metadata !DIExpression()), !dbg !21
-  %fptr = load void (i8*)*, void (i8*)** null, align 8
-  musttail call swifttailcc void %fptr(i8* null)
+  call void @llvm.dbg.value(metadata ptr %call, metadata !19, metadata !DIExpression()), !dbg !21
+  %fptr = load ptr, ptr null, align 8
+  musttail call swifttailcc void %fptr(ptr null)
   ret void
 }
 
-declare i8* @pthread_getspecific()
+declare ptr @pthread_getspecific()
 
 ; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
 declare void @llvm.dbg.value(metadata, metadata, metadata) #1

diff  --git a/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll b/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll
index 49ac1681e67ae..5c24f3f0d5bc6 100644
--- a/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall-string-rvo.ll
@@ -14,35 +14,33 @@ target triple = "aarch64-linux-gnu"
 %class.basic_string.11.42.73 = type { %"class.__gnu_cxx::__versa_string.10.41.72" }
 %"class.__gnu_cxx::__versa_string.10.41.72" = type { %"class.__gnu_cxx::__sso_string_base.9.40.71" }
 %"class.__gnu_cxx::__sso_string_base.9.40.71" = type { %"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69", i64, %union.anon.8.39.70 }
-%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69" = type { i8* }
+%"struct.__gnu_cxx::__vstring_utility<char, std::char_traits<char>, std::allocator<char> >::_Alloc_hider.7.38.69" = type { ptr }
 %union.anon.8.39.70 = type { i64, [8 x i8] }
 
-declare void @TestBaz(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+declare void @TestBaz(ptr noalias sret(%class.basic_string.11.42.73) %arg)
 
-define void @TestBar(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg) {
+define void @TestBar(ptr noalias sret(%class.basic_string.11.42.73) %arg) {
 bb:
-  call void @TestBaz(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+  call void @TestBaz(ptr noalias sret(%class.basic_string.11.42.73) %arg)
   ret void
 }
 
-define void @TestFoo(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg) {
+define void @TestFoo(ptr noalias sret(%class.basic_string.11.42.73) %arg) {
 ; CHECK-LABEL: TestFoo:
 ; CHECK: b TestBar
 bb:
-  %tmp = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2
-  %tmp1 = bitcast %class.basic_string.11.42.73* %arg to %union.anon.8.39.70**
-  store %union.anon.8.39.70* %tmp, %union.anon.8.39.70** %tmp1, align 8
-  %tmp2 = bitcast %union.anon.8.39.70* %tmp to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* nonnull undef, i64 13, i1 false)
-  %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 1
-  store i64 13, i64* %tmp3, align 8
-  %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, %class.basic_string.11.42.73* %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
-  store i8 0, i8* %tmp4, align 1
-  tail call void @TestBar(%class.basic_string.11.42.73* noalias sret(%class.basic_string.11.42.73) %arg)
+  %tmp = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 2
+  store ptr %tmp, ptr %arg, align 8
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %tmp, ptr nonnull undef, i64 13, i1 false)
+  %tmp3 = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 1
+  store i64 13, ptr %tmp3, align 8
+  %tmp4 = getelementptr inbounds %class.basic_string.11.42.73, ptr %arg, i64 0, i32 0, i32 0, i32 2, i32 1, i64 5
+  store i8 0, ptr %tmp4, align 1
+  tail call void @TestBar(ptr noalias sret(%class.basic_string.11.42.73) %arg)
   ret void
 }
 
 ; Function Attrs: argmemonly nounwind
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #0
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #0
 
 attributes #0 = { argmemonly nounwind }

diff  --git a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
index 776ec397733b8..3ffbb47d7990a 100644
--- a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
@@ -6,14 +6,14 @@
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "arm64-apple-ios7.0.0"
 
-define void @caller2(i8* %a0, i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9) {
+define void @caller2(ptr %a0, ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9) {
 entry:
-  tail call void @callee2(i8* %a1, i8* %a2, i8* %a3, i8* %a4, i8* %a5, i8* %a6, i8* %a7, i8* %a8, i8* %a9, i8* %a0)
+  tail call void @callee2(ptr %a1, ptr %a2, ptr %a3, ptr %a4, ptr %a5, ptr %a6, ptr %a7, ptr %a8, ptr %a9, ptr %a0)
   ret void
 }
 
-declare void @callee2(i8*, i8*, i8*, i8*, i8*,
-                      i8*, i8*, i8*, i8*, i8*)
+declare void @callee2(ptr, ptr, ptr, ptr, ptr,
+                      ptr, ptr, ptr, ptr, ptr)
 
 ; Make sure there is a dependence between the load and store to the same stack
 ; location during a tail call. Tail calls clobber the incoming argument area and

diff  --git a/llvm/test/CodeGen/AArch64/taildup-cfi.ll b/llvm/test/CodeGen/AArch64/taildup-cfi.ll
index 11f6ff189cffa..23b4891752d2c 100644
--- a/llvm/test/CodeGen/AArch64/taildup-cfi.ll
+++ b/llvm/test/CodeGen/AArch64/taildup-cfi.ll
@@ -18,18 +18,18 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 @h = common local_unnamed_addr global i32 0, align 4
 
 ; Function Attrs: norecurse nounwind uwtable
-define void @n(i32 %o, i32* nocapture readonly %b) local_unnamed_addr #0 {
+define void @n(i32 %o, ptr nocapture readonly %b) local_unnamed_addr #0 {
 entry:
-  %0 = load i32, i32* @g, align 4, !tbaa !2
+  %0 = load i32, ptr @g, align 4, !tbaa !2
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %entry.if.end_crit_edge, label %if.then
 
 entry.if.end_crit_edge:                           ; preds = %entry
-  %.pre = load i32, i32* @f, align 4, !tbaa !2
+  %.pre = load i32, ptr @f, align 4, !tbaa !2
   br label %if.end
 
 if.then:                                          ; preds = %entry
-  store i32 0, i32* @f, align 4, !tbaa !2
+  store i32 0, ptr @f, align 4, !tbaa !2
   br label %if.end
 
 ; DARWIN:           Merging into block
@@ -41,11 +41,11 @@ if.end:                                           ; preds = %entry.if.end_crit_e
   br i1 %cmp6, label %for.body.lr.ph, label %for.end
 
 for.body.lr.ph:                                   ; preds = %if.end
-  %.pre7 = load i32, i32* @a, align 4, !tbaa !2
-  %.pre8 = load i32, i32* @l, align 4, !tbaa !2
-  %.pre9 = load i32, i32* @j, align 4, !tbaa !2
-  %.pre10 = load i32, i32* @k, align 4, !tbaa !2
-  %.pre11 = load i32, i32* @i, align 4, !tbaa !2
+  %.pre7 = load i32, ptr @a, align 4, !tbaa !2
+  %.pre8 = load i32, ptr @l, align 4, !tbaa !2
+  %.pre9 = load i32, ptr @j, align 4, !tbaa !2
+  %.pre10 = load i32, ptr @k, align 4, !tbaa !2
+  %.pre11 = load i32, ptr @i, align 4, !tbaa !2
   br label %for.body
 
 for.body:                                         ; preds = %if.end5, %for.body.lr.ph
@@ -53,7 +53,7 @@ for.body:                                         ; preds = %if.end5, %for.body.
   %3 = phi i32 [ %.pre10, %for.body.lr.ph ], [ %8, %if.end5 ]
   %4 = phi i32 [ %.pre9, %for.body.lr.ph ], [ %9, %if.end5 ]
   %5 = phi i32 [ %1, %for.body.lr.ph ], [ %inc, %if.end5 ]
-  store i32 %.pre7, i32* @m, align 4, !tbaa !2
+  store i32 %.pre7, ptr @m, align 4, !tbaa !2
   %mul = mul nsw i32 %3, %4
   %cmp1 = icmp sgt i32 %.pre8, %mul
   %conv = zext i1 %cmp1 to i32
@@ -61,25 +61,25 @@ for.body:                                         ; preds = %if.end5, %for.body.
   br i1 %cmp2, label %if.then4, label %if.end5
 
 if.then4:                                         ; preds = %for.body
-  %6 = load i32, i32* @d, align 4, !tbaa !2
-  store i32 %6, i32* @k, align 4, !tbaa !2
-  store i32 %6, i32* @i, align 4, !tbaa !2
-  store i32 %6, i32* @j, align 4, !tbaa !2
+  %6 = load i32, ptr @d, align 4, !tbaa !2
+  store i32 %6, ptr @k, align 4, !tbaa !2
+  store i32 %6, ptr @i, align 4, !tbaa !2
+  store i32 %6, ptr @j, align 4, !tbaa !2
   br label %if.end5
 
 if.end5:                                          ; preds = %if.then4, %for.body
   %7 = phi i32 [ %6, %if.then4 ], [ %2, %for.body ]
   %8 = phi i32 [ %6, %if.then4 ], [ %3, %for.body ]
   %9 = phi i32 [ %6, %if.then4 ], [ %4, %for.body ]
-  %10 = load i32, i32* @c, align 4, !tbaa !2
+  %10 = load i32, ptr @c, align 4, !tbaa !2
   %idxprom = sext i32 %10 to i64
-  %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
-  %11 = load i32, i32* %arrayidx, align 4, !tbaa !2
-  %12 = load i32, i32* @e, align 4, !tbaa !2
+  %arrayidx = getelementptr inbounds i32, ptr %b, i64 %idxprom
+  %11 = load i32, ptr %arrayidx, align 4, !tbaa !2
+  %12 = load i32, ptr @e, align 4, !tbaa !2
   %sub = sub nsw i32 %11, %12
-  store i32 %sub, i32* @h, align 4, !tbaa !2
+  store i32 %sub, ptr @h, align 4, !tbaa !2
   %inc = add nsw i32 %5, 1
-  store i32 %inc, i32* @f, align 4, !tbaa !2
+  store i32 %inc, ptr @f, align 4, !tbaa !2
   %exitcond = icmp eq i32 %inc, %o
   br i1 %exitcond, label %for.end, label %for.body
 

diff  --git a/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll b/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
index ded6a054363c1..9c20aed6cf9cc 100644
--- a/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
+++ b/llvm/test/CodeGen/AArch64/tailmerging_in_mbp.ll
@@ -5,7 +5,7 @@
 ; CHECK-LABEL: %cond.false12.i
 ; CHECK:       csel x10, x8, x9, gt
 ; CHECK-NEXT:  b.le .LBB0_11
-define i64 @test(i64 %n, i64* %a, i64* %b, i64* %c, i64* %d, i64* %e, i64* %f) {
+define i64 @test(i64 %n, ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f) {
 entry:
   %cmp28 = icmp sgt i64 %n, 1
   br i1 %cmp28, label %for.body, label %for.end
@@ -13,10 +13,10 @@ entry:
 for.body:                                         ; preds = %for.body.lr.ph, %if.end
   %j = phi i64 [ %n, %entry ], [ %div, %if.end ]
   %div = lshr i64 %j, 1
-  %a.arrayidx = getelementptr inbounds i64, i64* %a, i64 %div
-  %a.j = load i64, i64* %a.arrayidx
-  %b.arrayidx = getelementptr inbounds i64, i64* %b, i64 %div
-  %b.j = load i64, i64* %b.arrayidx
+  %a.arrayidx = getelementptr inbounds i64, ptr %a, i64 %div
+  %a.j = load i64, ptr %a.arrayidx
+  %b.arrayidx = getelementptr inbounds i64, ptr %b, i64 %div
+  %b.j = load i64, ptr %b.arrayidx
   %cmp.i = icmp slt i64 %a.j, %b.j
   br i1 %cmp.i, label %for.end.loopexit, label %cond.false.i
 
@@ -25,10 +25,10 @@ cond.false.i:                                     ; preds = %for.body
   br i1 %cmp4.i, label %if.end, label %cond.false6.i
 
 cond.false6.i:                                    ; preds = %cond.false.i
-  %c.arrayidx = getelementptr inbounds i64, i64* %c, i64 %div
-  %c.j = load i64, i64* %c.arrayidx
-  %d.arrayidx = getelementptr inbounds i64, i64* %d, i64 %div
-  %d.j = load i64, i64* %d.arrayidx
+  %c.arrayidx = getelementptr inbounds i64, ptr %c, i64 %div
+  %c.j = load i64, ptr %c.arrayidx
+  %d.arrayidx = getelementptr inbounds i64, ptr %d, i64 %div
+  %d.j = load i64, ptr %d.arrayidx
   %cmp9.i = icmp slt i64 %c.j, %d.j
   br i1 %cmp9.i, label %for.end.loopexit, label %cond.false11.i
 
@@ -37,10 +37,10 @@ cond.false11.i:                                   ; preds = %cond.false6.i
   br i1 %cmp14.i, label %if.end, label %cond.false12.i
 
 cond.false12.i:                           ; preds = %cond.false11.i
-  %e.arrayidx = getelementptr inbounds i64, i64* %e, i64 %div
-  %e.j = load i64, i64* %e.arrayidx
-  %f.arrayidx = getelementptr inbounds i64, i64* %f, i64 %div
-  %f.j = load i64, i64* %f.arrayidx
+  %e.arrayidx = getelementptr inbounds i64, ptr %e, i64 %div
+  %e.j = load i64, ptr %e.arrayidx
+  %f.arrayidx = getelementptr inbounds i64, ptr %f, i64 %div
+  %f.j = load i64, ptr %f.arrayidx
   %cmp19.i = icmp sgt i64 %e.j, %f.j
   br i1 %cmp19.i, label %if.end, label %for.end.loopexit
 

diff  --git a/llvm/test/CodeGen/AArch64/tbi.ll b/llvm/test/CodeGen/AArch64/tbi.ll
index 153bd4e6438d8..285726a485b87 100644
--- a/llvm/test/CodeGen/AArch64/tbi.ll
+++ b/llvm/test/CodeGen/AArch64/tbi.ll
@@ -8,8 +8,8 @@
 ; NO_TBI: and x
 define i32 @ld_and32(i64 %p) {
   %and = and i64 %p, 72057594037927935
-  %cast = inttoptr i64 %and to i32*
-  %load = load i32, i32* %cast
+  %cast = inttoptr i64 %and to ptr
+  %load = load i32, ptr %cast
   ret i32 %load
 }
 
@@ -19,9 +19,9 @@ define i32 @ld_and32(i64 %p) {
 ; NO_TBI: and x
 define i32 @ld_and_plus_offset(i64 %p) {
   %and = and i64 %p, 72057594037927935
-  %cast = inttoptr i64 %and to i32*
-  %gep = getelementptr i32, i32* %cast, i64 4
-  %load = load i32, i32* %gep
+  %cast = inttoptr i64 %and to ptr
+  %gep = getelementptr i32, ptr %cast, i64 4
+  %load = load i32, ptr %gep
   ret i32 %load
 }
 
@@ -31,8 +31,8 @@ define i32 @ld_and_plus_offset(i64 %p) {
 ; NO_TBI: and x
 define i32 @ld_and32_wider(i64 %p) {
   %and = and i64 %p, 1152921504606846975
-  %cast = inttoptr i64 %and to i32*
-  %load = load i32, i32* %cast
+  %cast = inttoptr i64 %and to ptr
+  %load = load i32, ptr %cast
   ret i32 %load
 }
 
@@ -41,8 +41,8 @@ define i32 @ld_and32_wider(i64 %p) {
 ; NO_TBI: and x
 define i64 @ld_and64(i64 %p) {
   %and = and i64 %p, 72057594037927935
-  %cast = inttoptr i64 %and to i64*
-  %load = load i64, i64* %cast
+  %cast = inttoptr i64 %and to ptr
+  %load = load i64, ptr %cast
   ret i64 %load
 }
 
@@ -51,8 +51,8 @@ define i64 @ld_and64(i64 %p) {
 ; NO_TBI: and x
 define void @st_and32(i64 %p, i32 %v) {
   %and = and i64 %p, 72057594037927935
-  %cast = inttoptr i64 %and to i32*
-  store i32 %v, i32* %cast
+  %cast = inttoptr i64 %and to ptr
+  store i32 %v, ptr %cast
   ret void
 }
 
@@ -63,8 +63,8 @@ define void @st_and32(i64 %p, i32 %v) {
 define i32 @ld_ro(i64 %a, i64 %b) {
   %p = add i64 %a, %b
   %and = and i64 %p, 72057594037927935
-  %cast = inttoptr i64 %and to i32*
-  %load = load i32, i32* %cast
+  %cast = inttoptr i64 %and to ptr
+  %load = load i32, ptr %cast
   ret i32 %load
 }
 
@@ -75,8 +75,8 @@ define i32 @ld_ro(i64 %a, i64 %b) {
 define i32 @ld_ro2(i64 %a, i64 %b) {
   %and = and i64 %a, 72057594037927935
   %p = add i64 %and, %b
-  %cast = inttoptr i64 %p to i32*
-  %load = load i32, i32* %cast
+  %cast = inttoptr i64 %p to ptr
+  %load = load i32, ptr %cast
   ret i32 %load
 }
 
@@ -87,8 +87,8 @@ define i32 @ld_ro2(i64 %a, i64 %b) {
 define i32 @ld_indirect_and(i64 %r1, i64 %r2) {
   %and = and i64 %r1, 72057594037927935
   %p = or i64 %and, %r2
-  %cast = inttoptr i64 %p to i32*
-  %load = load i32, i32* %cast
+  %cast = inttoptr i64 %p to ptr
+  %load = load i32, ptr %cast
   ret i32 %load
 }
 
@@ -96,8 +96,8 @@ define i32 @ld_indirect_and(i64 %r1, i64 %r2) {
 ; BOTH: and x
 define i32 @ld_and32_narrower(i64 %p) {
   %and = and i64 %p, 36028797018963967
-  %cast = inttoptr i64 %and to i32*
-  %load = load i32, i32* %cast
+  %cast = inttoptr i64 %and to ptr
+  %load = load i32, ptr %cast
   ret i32 %load
 }
 
@@ -107,7 +107,7 @@ define i32 @ld_and8(i64 %base, i8 %off) {
   %off_masked = and i8 %off, 63
   %off_64 = zext i8 %off_masked to i64
   %p = add i64 %base, %off_64
-  %cast = inttoptr i64 %p to i32*
-  %load = load i32, i32* %cast
+  %cast = inttoptr i64 %p to ptr
+  %load = load i32, ptr %cast
   ret i32 %load
 }

diff  --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 8d015abeed56e..1a33e612ad58c 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-eabi < %s | FileCheck %s
 
-define void @loop1(i8* noalias nocapture noundef writeonly %dst, float* nocapture noundef readonly %data, i32 noundef %width) {
+define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %data, i32 noundef %width) {
 ; CHECK-LABEL: loop1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w2, #1
@@ -79,37 +79,33 @@ for.body.preheader:                               ; preds = %entry
 vector.ph:                                        ; preds = %for.body.preheader
   %n.vec = and i64 %2, 8589934584
   %ind.end = trunc i64 %n.vec to i32
-  %ind.end14 = getelementptr float, float* %data, i64 %n.vec
-  %ind.end16 = getelementptr i8, i8* %dst, i64 %n.vec
+  %ind.end14 = getelementptr float, ptr %data, i64 %n.vec
+  %ind.end16 = getelementptr i8, ptr %dst, i64 %n.vec
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-  %next.gep = getelementptr float, float* %data, i64 %index
-  %next.gep18 = getelementptr i8, i8* %dst, i64 %index
-  %3 = bitcast float* %next.gep to <4 x float>*
-  %wide.load = load <4 x float>, <4 x float>* %3, align 4
-  %4 = getelementptr float, float* %next.gep, i64 4
-  %5 = bitcast float* %4 to <4 x float>*
-  %wide.load20 = load <4 x float>, <4 x float>* %5, align 4
-  %6 = fcmp olt <4 x float> %wide.load, zeroinitializer
-  %7 = fcmp olt <4 x float> %wide.load20, zeroinitializer
-  %8 = fcmp ogt <4 x float> %wide.load, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %9 = fcmp ogt <4 x float> %wide.load20, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %10 = select <4 x i1> %8, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %wide.load
-  %11 = select <4 x i1> %9, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %wide.load20
-  %12 = select <4 x i1> %6, <4 x float> zeroinitializer, <4 x float> %10
-  %13 = select <4 x i1> %7, <4 x float> zeroinitializer, <4 x float> %11
-  %14 = fptoui <4 x float> %12 to <4 x i8>
-  %15 = fptoui <4 x float> %13 to <4 x i8>
-  %16 = bitcast i8* %next.gep18 to <4 x i8>*
-  store <4 x i8> %14, <4 x i8>* %16, align 1
-  %17 = getelementptr i8, i8* %next.gep18, i64 4
-  %18 = bitcast i8* %17 to <4 x i8>*
-  store <4 x i8> %15, <4 x i8>* %18, align 1
+  %next.gep = getelementptr float, ptr %data, i64 %index
+  %next.gep18 = getelementptr i8, ptr %dst, i64 %index
+  %wide.load = load <4 x float>, ptr %next.gep, align 4
+  %3 = getelementptr float, ptr %next.gep, i64 4
+  %wide.load20 = load <4 x float>, ptr %3, align 4
+  %4 = fcmp olt <4 x float> %wide.load, zeroinitializer
+  %5 = fcmp olt <4 x float> %wide.load20, zeroinitializer
+  %6 = fcmp ogt <4 x float> %wide.load, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %7 = fcmp ogt <4 x float> %wide.load20, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %8 = select <4 x i1> %6, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %wide.load
+  %9 = select <4 x i1> %7, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %wide.load20
+  %10 = select <4 x i1> %4, <4 x float> zeroinitializer, <4 x float> %8
+  %11 = select <4 x i1> %5, <4 x float> zeroinitializer, <4 x float> %9
+  %12 = fptoui <4 x float> %10 to <4 x i8>
+  %13 = fptoui <4 x float> %11 to <4 x i8>
+  store <4 x i8> %12, ptr %next.gep18, align 1
+  %14 = getelementptr i8, ptr %next.gep18, i64 4
+  store <4 x i8> %13, ptr %14, align 1
   %index.next = add nuw i64 %index, 8
-  %19 = icmp eq i64 %index.next, %n.vec
-  br i1 %19, label %middle.block, label %vector.body
+  %15 = icmp eq i64 %index.next, %n.vec
+  br i1 %15, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i64 %2, %n.vec
@@ -117,8 +113,8 @@ middle.block:                                     ; preds = %vector.body
 
 for.body.preheader21:                             ; preds = %for.body.preheader, %middle.block
   %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %src.011.ph = phi float* [ %data, %for.body.preheader ], [ %ind.end14, %middle.block ]
-  %dst.addr.010.ph = phi i8* [ %dst, %for.body.preheader ], [ %ind.end16, %middle.block ]
+  %src.011.ph = phi ptr [ %data, %for.body.preheader ], [ %ind.end14, %middle.block ]
+  %dst.addr.010.ph = phi ptr [ %dst, %for.body.preheader ], [ %ind.end16, %middle.block ]
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
@@ -126,23 +122,23 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader21, %for.body
   %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
-  %src.011 = phi float* [ %add.ptr, %for.body ], [ %src.011.ph, %for.body.preheader21 ]
-  %dst.addr.010 = phi i8* [ %add.ptr2, %for.body ], [ %dst.addr.010.ph, %for.body.preheader21 ]
-  %20 = load float, float* %src.011, align 4
-  %cmp.i = fcmp olt float %20, 0.000000e+00
-  %cmp1.i = fcmp ogt float %20, 2.550000e+02
-  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %20
+  %src.011 = phi ptr [ %add.ptr, %for.body ], [ %src.011.ph, %for.body.preheader21 ]
+  %dst.addr.010 = phi ptr [ %add.ptr2, %for.body ], [ %dst.addr.010.ph, %for.body.preheader21 ]
+  %16 = load float, ptr %src.011, align 4
+  %cmp.i = fcmp olt float %16, 0.000000e+00
+  %cmp1.i = fcmp ogt float %16, 2.550000e+02
+  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %16
   %retval.0.i = select i1 %cmp.i, float 0.000000e+00, float %.x.i
   %conv = fptoui float %retval.0.i to i8
-  store i8 %conv, i8* %dst.addr.010, align 1
-  %add.ptr = getelementptr inbounds float, float* %src.011, i64 1
-  %add.ptr2 = getelementptr inbounds i8, i8* %dst.addr.010, i64 1
+  store i8 %conv, ptr %dst.addr.010, align 1
+  %add.ptr = getelementptr inbounds float, ptr %src.011, i64 1
+  %add.ptr2 = getelementptr inbounds i8, ptr %dst.addr.010, i64 1
   %inc = add nuw nsw i32 %i.012, 1
   %exitcond.not = icmp eq i32 %inc, %width
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
-define void @loop2(i8* noalias nocapture noundef writeonly %dst, float* nocapture noundef readonly %data, i32 noundef %width) {
+define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %data, i32 noundef %width) {
 ; CHECK-LABEL: loop2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w2, #1
@@ -217,7 +213,6 @@ define void @loop2(i8* noalias nocapture noundef writeonly %dst, float* nocaptur
 ; CHECK-NEXT:    b.ne .LBB1_5
 ; CHECK-NEXT:    b .LBB1_7
 entry:
-  %data23 = bitcast float* %data to i8*
   %cmp19 = icmp sgt i32 %width, 0
   br i1 %cmp19, label %for.body.preheader, label %for.cond.cleanup
 
@@ -233,11 +228,10 @@ vector.memcheck:                                  ; preds = %for.body.preheader
   %4 = zext i32 %3 to i64
   %5 = shl nuw nsw i64 %4, 1
   %6 = add nuw nsw i64 %5, 2
-  %scevgep = getelementptr i8, i8* %dst, i64 %6
-  %scevgep24 = getelementptr float, float* %data, i64 %6
-  %scevgep2425 = bitcast float* %scevgep24 to i8*
-  %bound0 = icmp ugt i8* %scevgep2425, %dst
-  %bound1 = icmp ugt i8* %scevgep, %data23
+  %scevgep = getelementptr i8, ptr %dst, i64 %6
+  %scevgep24 = getelementptr float, ptr %data, i64 %6
+  %bound0 = icmp ugt ptr %scevgep24, %dst
+  %bound1 = icmp ugt ptr %scevgep, %data
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %for.body.preheader35, label %vector.ph
 
@@ -245,37 +239,35 @@ vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i64 %2, 8589934588
   %ind.end = trunc i64 %n.vec to i32
   %7 = shl nuw nsw i64 %n.vec, 1
-  %ind.end27 = getelementptr float, float* %data, i64 %7
+  %ind.end27 = getelementptr float, ptr %data, i64 %7
   %8 = shl nuw nsw i64 %n.vec, 1
-  %ind.end29 = getelementptr i8, i8* %dst, i64 %8
+  %ind.end29 = getelementptr i8, ptr %dst, i64 %8
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %9 = shl i64 %index, 1
-  %next.gep = getelementptr float, float* %data, i64 %9
+  %next.gep = getelementptr float, ptr %data, i64 %9
   %10 = shl i64 %index, 1
-  %11 = bitcast float* %next.gep to <8 x float>*
-  %wide.vec = load <8 x float>, <8 x float>* %11, align 4
+  %wide.vec = load <8 x float>, ptr %next.gep, align 4
   %strided.vec = shufflevector <8 x float> %wide.vec, <8 x float> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %strided.vec34 = shufflevector <8 x float> %wide.vec, <8 x float> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %12 = fcmp olt <4 x float> %strided.vec, zeroinitializer
-  %13 = fcmp ogt <4 x float> %strided.vec, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %14 = select <4 x i1> %13, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec
-  %15 = select <4 x i1> %12, <4 x float> zeroinitializer, <4 x float> %14
-  %16 = fptoui <4 x float> %15 to <4 x i8>
-  %17 = fcmp olt <4 x float> %strided.vec34, zeroinitializer
-  %18 = fcmp ogt <4 x float> %strided.vec34, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %19 = select <4 x i1> %18, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec34
-  %20 = select <4 x i1> %17, <4 x float> zeroinitializer, <4 x float> %19
-  %21 = fptoui <4 x float> %20 to <4 x i8>
-  %22 = getelementptr inbounds i8, i8* %dst, i64 %10
-  %23 = bitcast i8* %22 to <8 x i8>*
-  %interleaved.vec = shufflevector <4 x i8> %16, <4 x i8> %21, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x i8> %interleaved.vec, <8 x i8>* %23, align 1
+  %11 = fcmp olt <4 x float> %strided.vec, zeroinitializer
+  %12 = fcmp ogt <4 x float> %strided.vec, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %13 = select <4 x i1> %12, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec
+  %14 = select <4 x i1> %11, <4 x float> zeroinitializer, <4 x float> %13
+  %15 = fptoui <4 x float> %14 to <4 x i8>
+  %16 = fcmp olt <4 x float> %strided.vec34, zeroinitializer
+  %17 = fcmp ogt <4 x float> %strided.vec34, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %18 = select <4 x i1> %17, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec34
+  %19 = select <4 x i1> %16, <4 x float> zeroinitializer, <4 x float> %18
+  %20 = fptoui <4 x float> %19 to <4 x i8>
+  %21 = getelementptr inbounds i8, ptr %dst, i64 %10
+  %interleaved.vec = shufflevector <4 x i8> %15, <4 x i8> %20, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  store <8 x i8> %interleaved.vec, ptr %21, align 1
   %index.next = add nuw i64 %index, 4
-  %24 = icmp eq i64 %index.next, %n.vec
-  br i1 %24, label %middle.block, label %vector.body
+  %22 = icmp eq i64 %index.next, %n.vec
+  br i1 %22, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i64 %2, %n.vec
@@ -283,8 +275,8 @@ middle.block:                                     ; preds = %vector.body
 
 for.body.preheader35:                             ; preds = %vector.memcheck, %for.body.preheader, %middle.block
   %i.022.ph = phi i32 [ 0, %vector.memcheck ], [ 0, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %src.021.ph = phi float* [ %data, %vector.memcheck ], [ %data, %for.body.preheader ], [ %ind.end27, %middle.block ]
-  %dst.addr.020.ph = phi i8* [ %dst, %vector.memcheck ], [ %dst, %for.body.preheader ], [ %ind.end29, %middle.block ]
+  %src.021.ph = phi ptr [ %data, %vector.memcheck ], [ %data, %for.body.preheader ], [ %ind.end27, %middle.block ]
+  %dst.addr.020.ph = phi ptr [ %dst, %vector.memcheck ], [ %dst, %for.body.preheader ], [ %ind.end29, %middle.block ]
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
@@ -292,32 +284,32 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader35, %for.body
   %i.022 = phi i32 [ %inc, %for.body ], [ %i.022.ph, %for.body.preheader35 ]
-  %src.021 = phi float* [ %add.ptr, %for.body ], [ %src.021.ph, %for.body.preheader35 ]
-  %dst.addr.020 = phi i8* [ %add.ptr6, %for.body ], [ %dst.addr.020.ph, %for.body.preheader35 ]
-  %25 = load float, float* %src.021, align 4
-  %cmp.i = fcmp olt float %25, 0.000000e+00
-  %cmp1.i = fcmp ogt float %25, 2.550000e+02
-  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %25
+  %src.021 = phi ptr [ %add.ptr, %for.body ], [ %src.021.ph, %for.body.preheader35 ]
+  %dst.addr.020 = phi ptr [ %add.ptr6, %for.body ], [ %dst.addr.020.ph, %for.body.preheader35 ]
+  %23 = load float, ptr %src.021, align 4
+  %cmp.i = fcmp olt float %23, 0.000000e+00
+  %cmp1.i = fcmp ogt float %23, 2.550000e+02
+  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %23
   %retval.0.i = select i1 %cmp.i, float 0.000000e+00, float %.x.i
   %conv = fptoui float %retval.0.i to i8
-  store i8 %conv, i8* %dst.addr.020, align 1
-  %arrayidx2 = getelementptr inbounds float, float* %src.021, i64 1
-  %26 = load float, float* %arrayidx2, align 4
-  %cmp.i15 = fcmp olt float %26, 0.000000e+00
-  %cmp1.i16 = fcmp ogt float %26, 2.550000e+02
-  %.x.i17 = select i1 %cmp1.i16, float 2.550000e+02, float %26
+  store i8 %conv, ptr %dst.addr.020, align 1
+  %arrayidx2 = getelementptr inbounds float, ptr %src.021, i64 1
+  %24 = load float, ptr %arrayidx2, align 4
+  %cmp.i15 = fcmp olt float %24, 0.000000e+00
+  %cmp1.i16 = fcmp ogt float %24, 2.550000e+02
+  %.x.i17 = select i1 %cmp1.i16, float 2.550000e+02, float %24
   %retval.0.i18 = select i1 %cmp.i15, float 0.000000e+00, float %.x.i17
   %conv4 = fptoui float %retval.0.i18 to i8
-  %arrayidx5 = getelementptr inbounds i8, i8* %dst.addr.020, i64 1
-  store i8 %conv4, i8* %arrayidx5, align 1
-  %add.ptr = getelementptr inbounds float, float* %src.021, i64 2
-  %add.ptr6 = getelementptr inbounds i8, i8* %dst.addr.020, i64 2
+  %arrayidx5 = getelementptr inbounds i8, ptr %dst.addr.020, i64 1
+  store i8 %conv4, ptr %arrayidx5, align 1
+  %add.ptr = getelementptr inbounds float, ptr %src.021, i64 2
+  %add.ptr6 = getelementptr inbounds i8, ptr %dst.addr.020, i64 2
   %inc = add nuw nsw i32 %i.022, 1
   %exitcond.not = icmp eq i32 %inc, %width
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
-define void @loop3(i8* noalias nocapture noundef writeonly %dst, float* nocapture noundef readonly %data, i32 noundef %width) {
+define void @loop3(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %data, i32 noundef %width) {
 ; CHECK-LABEL: loop3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w2, #1
@@ -409,7 +401,6 @@ define void @loop3(i8* noalias nocapture noundef writeonly %dst, float* nocaptur
 ; CHECK-NEXT:    b.ne .LBB2_5
 ; CHECK-NEXT:    b .LBB2_7
 entry:
-  %data33 = bitcast float* %data to i8*
   %cmp29 = icmp sgt i32 %width, 0
   br i1 %cmp29, label %for.body.preheader, label %for.cond.cleanup
 
@@ -425,11 +416,10 @@ vector.memcheck:                                  ; preds = %for.body.preheader
   %4 = zext i32 %3 to i64
   %5 = mul nuw nsw i64 %4, 3
   %6 = add nuw nsw i64 %5, 3
-  %scevgep = getelementptr i8, i8* %dst, i64 %6
-  %scevgep34 = getelementptr float, float* %data, i64 %6
-  %scevgep3435 = bitcast float* %scevgep34 to i8*
-  %bound0 = icmp ugt i8* %scevgep3435, %dst
-  %bound1 = icmp ugt i8* %scevgep, %data33
+  %scevgep = getelementptr i8, ptr %dst, i64 %6
+  %scevgep34 = getelementptr float, ptr %data, i64 %6
+  %bound0 = icmp ugt ptr %scevgep34, %dst
+  %bound1 = icmp ugt ptr %scevgep, %data
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %for.body.preheader46, label %vector.ph
 
@@ -437,45 +427,43 @@ vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i64 %2, 8589934588
   %ind.end = trunc i64 %n.vec to i32
   %7 = mul nuw nsw i64 %n.vec, 3
-  %ind.end37 = getelementptr float, float* %data, i64 %7
+  %ind.end37 = getelementptr float, ptr %data, i64 %7
   %8 = mul nuw nsw i64 %n.vec, 3
-  %ind.end39 = getelementptr i8, i8* %dst, i64 %8
+  %ind.end39 = getelementptr i8, ptr %dst, i64 %8
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %9 = mul i64 %index, 3
-  %next.gep = getelementptr float, float* %data, i64 %9
+  %next.gep = getelementptr float, ptr %data, i64 %9
   %10 = mul i64 %index, 3
-  %11 = bitcast float* %next.gep to <12 x float>*
-  %wide.vec = load <12 x float>, <12 x float>* %11, align 4
+  %wide.vec = load <12 x float>, ptr %next.gep, align 4
   %strided.vec = shufflevector <12 x float> %wide.vec, <12 x float> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
   %strided.vec44 = shufflevector <12 x float> %wide.vec, <12 x float> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
   %strided.vec45 = shufflevector <12 x float> %wide.vec, <12 x float> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
-  %12 = fcmp olt <4 x float> %strided.vec, zeroinitializer
-  %13 = fcmp ogt <4 x float> %strided.vec, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %14 = select <4 x i1> %13, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec
-  %15 = select <4 x i1> %12, <4 x float> zeroinitializer, <4 x float> %14
-  %16 = fptoui <4 x float> %15 to <4 x i8>
-  %17 = fcmp olt <4 x float> %strided.vec44, zeroinitializer
-  %18 = fcmp ogt <4 x float> %strided.vec44, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %19 = select <4 x i1> %18, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec44
-  %20 = select <4 x i1> %17, <4 x float> zeroinitializer, <4 x float> %19
-  %21 = fptoui <4 x float> %20 to <4 x i8>
-  %22 = fcmp olt <4 x float> %strided.vec45, zeroinitializer
-  %23 = fcmp ogt <4 x float> %strided.vec45, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %24 = select <4 x i1> %23, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec45
-  %25 = select <4 x i1> %22, <4 x float> zeroinitializer, <4 x float> %24
-  %26 = fptoui <4 x float> %25 to <4 x i8>
-  %27 = getelementptr inbounds i8, i8* %dst, i64 %10
-  %28 = bitcast i8* %27 to <12 x i8>*
-  %29 = shufflevector <4 x i8> %16, <4 x i8> %21, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %30 = shufflevector <4 x i8> %26, <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
-  %interleaved.vec = shufflevector <8 x i8> %29, <8 x i8> %30, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
-  store <12 x i8> %interleaved.vec, <12 x i8>* %28, align 1
+  %11 = fcmp olt <4 x float> %strided.vec, zeroinitializer
+  %12 = fcmp ogt <4 x float> %strided.vec, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %13 = select <4 x i1> %12, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec
+  %14 = select <4 x i1> %11, <4 x float> zeroinitializer, <4 x float> %13
+  %15 = fptoui <4 x float> %14 to <4 x i8>
+  %16 = fcmp olt <4 x float> %strided.vec44, zeroinitializer
+  %17 = fcmp ogt <4 x float> %strided.vec44, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %18 = select <4 x i1> %17, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec44
+  %19 = select <4 x i1> %16, <4 x float> zeroinitializer, <4 x float> %18
+  %20 = fptoui <4 x float> %19 to <4 x i8>
+  %21 = fcmp olt <4 x float> %strided.vec45, zeroinitializer
+  %22 = fcmp ogt <4 x float> %strided.vec45, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %23 = select <4 x i1> %22, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec45
+  %24 = select <4 x i1> %21, <4 x float> zeroinitializer, <4 x float> %23
+  %25 = fptoui <4 x float> %24 to <4 x i8>
+  %26 = getelementptr inbounds i8, ptr %dst, i64 %10
+  %27 = shufflevector <4 x i8> %15, <4 x i8> %20, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %28 = shufflevector <4 x i8> %25, <4 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+  %interleaved.vec = shufflevector <8 x i8> %27, <8 x i8> %28, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+  store <12 x i8> %interleaved.vec, ptr %26, align 1
   %index.next = add nuw i64 %index, 4
-  %31 = icmp eq i64 %index.next, %n.vec
-  br i1 %31, label %middle.block, label %vector.body
+  %29 = icmp eq i64 %index.next, %n.vec
+  br i1 %29, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i64 %2, %n.vec
@@ -483,8 +471,8 @@ middle.block:                                     ; preds = %vector.body
 
 for.body.preheader46:                             ; preds = %vector.memcheck, %for.body.preheader, %middle.block
   %i.032.ph = phi i32 [ 0, %vector.memcheck ], [ 0, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %src.031.ph = phi float* [ %data, %vector.memcheck ], [ %data, %for.body.preheader ], [ %ind.end37, %middle.block ]
-  %dst.addr.030.ph = phi i8* [ %dst, %vector.memcheck ], [ %dst, %for.body.preheader ], [ %ind.end39, %middle.block ]
+  %src.031.ph = phi ptr [ %data, %vector.memcheck ], [ %data, %for.body.preheader ], [ %ind.end37, %middle.block ]
+  %dst.addr.030.ph = phi ptr [ %dst, %vector.memcheck ], [ %dst, %for.body.preheader ], [ %ind.end39, %middle.block ]
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
@@ -492,41 +480,41 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader46, %for.body
   %i.032 = phi i32 [ %inc, %for.body ], [ %i.032.ph, %for.body.preheader46 ]
-  %src.031 = phi float* [ %add.ptr, %for.body ], [ %src.031.ph, %for.body.preheader46 ]
-  %dst.addr.030 = phi i8* [ %add.ptr10, %for.body ], [ %dst.addr.030.ph, %for.body.preheader46 ]
-  %32 = load float, float* %src.031, align 4
-  %cmp.i = fcmp olt float %32, 0.000000e+00
-  %cmp1.i = fcmp ogt float %32, 2.550000e+02
-  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %32
+  %src.031 = phi ptr [ %add.ptr, %for.body ], [ %src.031.ph, %for.body.preheader46 ]
+  %dst.addr.030 = phi ptr [ %add.ptr10, %for.body ], [ %dst.addr.030.ph, %for.body.preheader46 ]
+  %30 = load float, ptr %src.031, align 4
+  %cmp.i = fcmp olt float %30, 0.000000e+00
+  %cmp1.i = fcmp ogt float %30, 2.550000e+02
+  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %30
   %retval.0.i = select i1 %cmp.i, float 0.000000e+00, float %.x.i
   %conv = fptoui float %retval.0.i to i8
-  store i8 %conv, i8* %dst.addr.030, align 1
-  %arrayidx2 = getelementptr inbounds float, float* %src.031, i64 1
-  %33 = load float, float* %arrayidx2, align 4
-  %cmp.i21 = fcmp olt float %33, 0.000000e+00
-  %cmp1.i22 = fcmp ogt float %33, 2.550000e+02
-  %.x.i23 = select i1 %cmp1.i22, float 2.550000e+02, float %33
+  store i8 %conv, ptr %dst.addr.030, align 1
+  %arrayidx2 = getelementptr inbounds float, ptr %src.031, i64 1
+  %31 = load float, ptr %arrayidx2, align 4
+  %cmp.i21 = fcmp olt float %31, 0.000000e+00
+  %cmp1.i22 = fcmp ogt float %31, 2.550000e+02
+  %.x.i23 = select i1 %cmp1.i22, float 2.550000e+02, float %31
   %retval.0.i24 = select i1 %cmp.i21, float 0.000000e+00, float %.x.i23
   %conv4 = fptoui float %retval.0.i24 to i8
-  %arrayidx5 = getelementptr inbounds i8, i8* %dst.addr.030, i64 1
-  store i8 %conv4, i8* %arrayidx5, align 1
-  %arrayidx6 = getelementptr inbounds float, float* %src.031, i64 2
-  %34 = load float, float* %arrayidx6, align 4
-  %cmp.i25 = fcmp olt float %34, 0.000000e+00
-  %cmp1.i26 = fcmp ogt float %34, 2.550000e+02
-  %.x.i27 = select i1 %cmp1.i26, float 2.550000e+02, float %34
+  %arrayidx5 = getelementptr inbounds i8, ptr %dst.addr.030, i64 1
+  store i8 %conv4, ptr %arrayidx5, align 1
+  %arrayidx6 = getelementptr inbounds float, ptr %src.031, i64 2
+  %32 = load float, ptr %arrayidx6, align 4
+  %cmp.i25 = fcmp olt float %32, 0.000000e+00
+  %cmp1.i26 = fcmp ogt float %32, 2.550000e+02
+  %.x.i27 = select i1 %cmp1.i26, float 2.550000e+02, float %32
   %retval.0.i28 = select i1 %cmp.i25, float 0.000000e+00, float %.x.i27
   %conv8 = fptoui float %retval.0.i28 to i8
-  %arrayidx9 = getelementptr inbounds i8, i8* %dst.addr.030, i64 2
-  store i8 %conv8, i8* %arrayidx9, align 1
-  %add.ptr = getelementptr inbounds float, float* %src.031, i64 3
-  %add.ptr10 = getelementptr inbounds i8, i8* %dst.addr.030, i64 3
+  %arrayidx9 = getelementptr inbounds i8, ptr %dst.addr.030, i64 2
+  store i8 %conv8, ptr %arrayidx9, align 1
+  %add.ptr = getelementptr inbounds float, ptr %src.031, i64 3
+  %add.ptr10 = getelementptr inbounds i8, ptr %dst.addr.030, i64 3
   %inc = add nuw nsw i32 %i.032, 1
   %exitcond.not = icmp eq i32 %inc, %width
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
 }
 
-define void @loop4(i8* noalias nocapture noundef writeonly %dst, float* nocapture noundef readonly %data, i32 noundef %width) {
+define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %data, i32 noundef %width) {
 ; CHECK-LABEL: loop4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w2, #1
@@ -625,7 +613,6 @@ define void @loop4(i8* noalias nocapture noundef writeonly %dst, float* nocaptur
 ; CHECK-NEXT:    b.ne .LBB3_5
 ; CHECK-NEXT:    b .LBB3_7
 entry:
-  %data43 = bitcast float* %data to i8*
   %cmp39 = icmp sgt i32 %width, 0
   br i1 %cmp39, label %for.body.preheader, label %for.cond.cleanup
 
@@ -641,11 +628,10 @@ vector.memcheck:                                  ; preds = %for.body.preheader
   %4 = zext i32 %3 to i64
   %5 = shl nuw nsw i64 %4, 2
   %6 = add nuw nsw i64 %5, 4
-  %scevgep = getelementptr i8, i8* %dst, i64 %6
-  %scevgep44 = getelementptr float, float* %data, i64 %6
-  %scevgep4445 = bitcast float* %scevgep44 to i8*
-  %bound0 = icmp ugt i8* %scevgep4445, %dst
-  %bound1 = icmp ugt i8* %scevgep, %data43
+  %scevgep = getelementptr i8, ptr %dst, i64 %6
+  %scevgep44 = getelementptr float, ptr %data, i64 %6
+  %bound0 = icmp ugt ptr %scevgep44, %dst
+  %bound1 = icmp ugt ptr %scevgep, %data
   %found.conflict = and i1 %bound0, %bound1
   br i1 %found.conflict, label %for.body.preheader57, label %vector.ph
 
@@ -653,51 +639,49 @@ vector.ph:                                        ; preds = %vector.memcheck
   %n.vec = and i64 %2, 8589934588
   %ind.end = trunc i64 %n.vec to i32
   %7 = shl nuw nsw i64 %n.vec, 2
-  %ind.end47 = getelementptr float, float* %data, i64 %7
+  %ind.end47 = getelementptr float, ptr %data, i64 %7
   %8 = shl nuw nsw i64 %n.vec, 2
-  %ind.end49 = getelementptr i8, i8* %dst, i64 %8
+  %ind.end49 = getelementptr i8, ptr %dst, i64 %8
   br label %vector.body
 
 vector.body:                                      ; preds = %vector.body, %vector.ph
   %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
   %9 = shl i64 %index, 2
-  %next.gep = getelementptr float, float* %data, i64 %9
+  %next.gep = getelementptr float, ptr %data, i64 %9
   %10 = shl i64 %index, 2
-  %11 = bitcast float* %next.gep to <16 x float>*
-  %wide.vec = load <16 x float>, <16 x float>* %11, align 4
+  %wide.vec = load <16 x float>, ptr %next.gep, align 4
   %strided.vec = shufflevector <16 x float> %wide.vec, <16 x float> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
   %strided.vec54 = shufflevector <16 x float> %wide.vec, <16 x float> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
   %strided.vec55 = shufflevector <16 x float> %wide.vec, <16 x float> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
   %strided.vec56 = shufflevector <16 x float> %wide.vec, <16 x float> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
-  %12 = fcmp olt <4 x float> %strided.vec, zeroinitializer
-  %13 = fcmp ogt <4 x float> %strided.vec, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %14 = select <4 x i1> %13, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec
-  %15 = select <4 x i1> %12, <4 x float> zeroinitializer, <4 x float> %14
-  %16 = fptoui <4 x float> %15 to <4 x i8>
-  %17 = fcmp olt <4 x float> %strided.vec54, zeroinitializer
-  %18 = fcmp ogt <4 x float> %strided.vec54, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %19 = select <4 x i1> %18, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec54
-  %20 = select <4 x i1> %17, <4 x float> zeroinitializer, <4 x float> %19
-  %21 = fptoui <4 x float> %20 to <4 x i8>
-  %22 = fcmp olt <4 x float> %strided.vec55, zeroinitializer
-  %23 = fcmp ogt <4 x float> %strided.vec55, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %24 = select <4 x i1> %23, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec55
-  %25 = select <4 x i1> %22, <4 x float> zeroinitializer, <4 x float> %24
-  %26 = fptoui <4 x float> %25 to <4 x i8>
-  %27 = fcmp olt <4 x float> %strided.vec56, zeroinitializer
-  %28 = fcmp ogt <4 x float> %strided.vec56, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
-  %29 = select <4 x i1> %28, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec56
-  %30 = select <4 x i1> %27, <4 x float> zeroinitializer, <4 x float> %29
-  %31 = fptoui <4 x float> %30 to <4 x i8>
-  %32 = getelementptr inbounds i8, i8* %dst, i64 %10
-  %33 = bitcast i8* %32 to <16 x i8>*
-  %34 = shufflevector <4 x i8> %16, <4 x i8> %21, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %35 = shufflevector <4 x i8> %26, <4 x i8> %31, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-  %interleaved.vec = shufflevector <8 x i8> %34, <8 x i8> %35, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
-  store <16 x i8> %interleaved.vec, <16 x i8>* %33, align 1
+  %11 = fcmp olt <4 x float> %strided.vec, zeroinitializer
+  %12 = fcmp ogt <4 x float> %strided.vec, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %13 = select <4 x i1> %12, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec
+  %14 = select <4 x i1> %11, <4 x float> zeroinitializer, <4 x float> %13
+  %15 = fptoui <4 x float> %14 to <4 x i8>
+  %16 = fcmp olt <4 x float> %strided.vec54, zeroinitializer
+  %17 = fcmp ogt <4 x float> %strided.vec54, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %18 = select <4 x i1> %17, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec54
+  %19 = select <4 x i1> %16, <4 x float> zeroinitializer, <4 x float> %18
+  %20 = fptoui <4 x float> %19 to <4 x i8>
+  %21 = fcmp olt <4 x float> %strided.vec55, zeroinitializer
+  %22 = fcmp ogt <4 x float> %strided.vec55, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %23 = select <4 x i1> %22, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec55
+  %24 = select <4 x i1> %21, <4 x float> zeroinitializer, <4 x float> %23
+  %25 = fptoui <4 x float> %24 to <4 x i8>
+  %26 = fcmp olt <4 x float> %strided.vec56, zeroinitializer
+  %27 = fcmp ogt <4 x float> %strided.vec56, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
+  %28 = select <4 x i1> %27, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %strided.vec56
+  %29 = select <4 x i1> %26, <4 x float> zeroinitializer, <4 x float> %28
+  %30 = fptoui <4 x float> %29 to <4 x i8>
+  %31 = getelementptr inbounds i8, ptr %dst, i64 %10
+  %32 = shufflevector <4 x i8> %15, <4 x i8> %20, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %33 = shufflevector <4 x i8> %25, <4 x i8> %30, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %interleaved.vec = shufflevector <8 x i8> %32, <8 x i8> %33, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+  store <16 x i8> %interleaved.vec, ptr %31, align 1
   %index.next = add nuw i64 %index, 4
-  %36 = icmp eq i64 %index.next, %n.vec
-  br i1 %36, label %middle.block, label %vector.body
+  %34 = icmp eq i64 %index.next, %n.vec
+  br i1 %34, label %middle.block, label %vector.body
 
 middle.block:                                     ; preds = %vector.body
   %cmp.n = icmp eq i64 %2, %n.vec
@@ -705,8 +689,8 @@ middle.block:                                     ; preds = %vector.body
 
 for.body.preheader57:                             ; preds = %vector.memcheck, %for.body.preheader, %middle.block
   %i.042.ph = phi i32 [ 0, %vector.memcheck ], [ 0, %for.body.preheader ], [ %ind.end, %middle.block ]
-  %src.041.ph = phi float* [ %data, %vector.memcheck ], [ %data, %for.body.preheader ], [ %ind.end47, %middle.block ]
-  %dst.addr.040.ph = phi i8* [ %dst, %vector.memcheck ], [ %dst, %for.body.preheader ], [ %ind.end49, %middle.block ]
+  %src.041.ph = phi ptr [ %data, %vector.memcheck ], [ %data, %for.body.preheader ], [ %ind.end47, %middle.block ]
+  %dst.addr.040.ph = phi ptr [ %dst, %vector.memcheck ], [ %dst, %for.body.preheader ], [ %ind.end49, %middle.block ]
   br label %for.body
 
 for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
@@ -714,44 +698,44 @@ for.cond.cleanup:                                 ; preds = %for.body, %middle.b
 
 for.body:                                         ; preds = %for.body.preheader57, %for.body
   %i.042 = phi i32 [ %inc, %for.body ], [ %i.042.ph, %for.body.preheader57 ]
-  %src.041 = phi float* [ %add.ptr, %for.body ], [ %src.041.ph, %for.body.preheader57 ]
-  %dst.addr.040 = phi i8* [ %add.ptr14, %for.body ], [ %dst.addr.040.ph, %for.body.preheader57 ]
-  %37 = load float, float* %src.041, align 4
-  %cmp.i = fcmp olt float %37, 0.000000e+00
-  %cmp1.i = fcmp ogt float %37, 2.550000e+02
-  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %37
+  %src.041 = phi ptr [ %add.ptr, %for.body ], [ %src.041.ph, %for.body.preheader57 ]
+  %dst.addr.040 = phi ptr [ %add.ptr14, %for.body ], [ %dst.addr.040.ph, %for.body.preheader57 ]
+  %35 = load float, ptr %src.041, align 4
+  %cmp.i = fcmp olt float %35, 0.000000e+00
+  %cmp1.i = fcmp ogt float %35, 2.550000e+02
+  %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %35
   %retval.0.i = select i1 %cmp.i, float 0.000000e+00, float %.x.i
   %conv = fptoui float %retval.0.i to i8
-  store i8 %conv, i8* %dst.addr.040, align 1
-  %arrayidx2 = getelementptr inbounds float, float* %src.041, i64 1
-  %38 = load float, float* %arrayidx2, align 4
-  %cmp.i27 = fcmp olt float %38, 0.000000e+00
-  %cmp1.i28 = fcmp ogt float %38, 2.550000e+02
-  %.x.i29 = select i1 %cmp1.i28, float 2.550000e+02, float %38
+  store i8 %conv, ptr %dst.addr.040, align 1
+  %arrayidx2 = getelementptr inbounds float, ptr %src.041, i64 1
+  %36 = load float, ptr %arrayidx2, align 4
+  %cmp.i27 = fcmp olt float %36, 0.000000e+00
+  %cmp1.i28 = fcmp ogt float %36, 2.550000e+02
+  %.x.i29 = select i1 %cmp1.i28, float 2.550000e+02, float %36
   %retval.0.i30 = select i1 %cmp.i27, float 0.000000e+00, float %.x.i29
   %conv4 = fptoui float %retval.0.i30 to i8
-  %arrayidx5 = getelementptr inbounds i8, i8* %dst.addr.040, i64 1
-  store i8 %conv4, i8* %arrayidx5, align 1
-  %arrayidx6 = getelementptr inbounds float, float* %src.041, i64 2
-  %39 = load float, float* %arrayidx6, align 4
-  %cmp.i31 = fcmp olt float %39, 0.000000e+00
-  %cmp1.i32 = fcmp ogt float %39, 2.550000e+02
-  %.x.i33 = select i1 %cmp1.i32, float 2.550000e+02, float %39
+  %arrayidx5 = getelementptr inbounds i8, ptr %dst.addr.040, i64 1
+  store i8 %conv4, ptr %arrayidx5, align 1
+  %arrayidx6 = getelementptr inbounds float, ptr %src.041, i64 2
+  %37 = load float, ptr %arrayidx6, align 4
+  %cmp.i31 = fcmp olt float %37, 0.000000e+00
+  %cmp1.i32 = fcmp ogt float %37, 2.550000e+02
+  %.x.i33 = select i1 %cmp1.i32, float 2.550000e+02, float %37
   %retval.0.i34 = select i1 %cmp.i31, float 0.000000e+00, float %.x.i33
   %conv8 = fptoui float %retval.0.i34 to i8
-  %arrayidx9 = getelementptr inbounds i8, i8* %dst.addr.040, i64 2
-  store i8 %conv8, i8* %arrayidx9, align 1
-  %arrayidx10 = getelementptr inbounds float, float* %src.041, i64 3
-  %40 = load float, float* %arrayidx10, align 4
-  %cmp.i35 = fcmp olt float %40, 0.000000e+00
-  %cmp1.i36 = fcmp ogt float %40, 2.550000e+02
-  %.x.i37 = select i1 %cmp1.i36, float 2.550000e+02, float %40
+  %arrayidx9 = getelementptr inbounds i8, ptr %dst.addr.040, i64 2
+  store i8 %conv8, ptr %arrayidx9, align 1
+  %arrayidx10 = getelementptr inbounds float, ptr %src.041, i64 3
+  %38 = load float, ptr %arrayidx10, align 4
+  %cmp.i35 = fcmp olt float %38, 0.000000e+00
+  %cmp1.i36 = fcmp ogt float %38, 2.550000e+02
+  %.x.i37 = select i1 %cmp1.i36, float 2.550000e+02, float %38
   %retval.0.i38 = select i1 %cmp.i35, float 0.000000e+00, float %.x.i37
   %conv12 = fptoui float %retval.0.i38 to i8
-  %arrayidx13 = getelementptr inbounds i8, i8* %dst.addr.040, i64 3
-  store i8 %conv12, i8* %arrayidx13, align 1
-  %add.ptr = getelementptr inbounds float, float* %src.041, i64 4
-  %add.ptr14 = getelementptr inbounds i8, i8* %dst.addr.040, i64 4
+  %arrayidx13 = getelementptr inbounds i8, ptr %dst.addr.040, i64 3
+  store i8 %conv12, ptr %arrayidx13, align 1
+  %add.ptr = getelementptr inbounds float, ptr %src.041, i64 4
+  %add.ptr14 = getelementptr inbounds i8, ptr %dst.addr.040, i64 4
   %inc = add nuw nsw i32 %i.042, 1
   %exitcond.not = icmp eq i32 %inc, %width
   br i1 %exitcond.not, label %for.cond.cleanup, label %for.body

diff  --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
index 5018ce0b3878c..1edea229cfe87 100644
--- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -204,14 +204,14 @@ if.end:
   ret void
 }
 
-define void @test11(i64 %val1, i64* %ptr) {
+define void @test11(i64 %val1, ptr %ptr) {
 ; CHECK-LABEL: @test11
 
 ; CHECK: ldr [[CMP:x[0-9]+]], [x1]
 ; CHECK-NOT: cmp
 ; CHECK: tbnz [[CMP]], #63
 
-  %val = load i64, i64* %ptr
+  %val = load i64, ptr %ptr
   %tst = icmp slt i64 %val, 0
   br i1 %tst, label %if.then, label %if.end
 

diff  --git a/llvm/test/CodeGen/AArch64/tst-br.ll b/llvm/test/CodeGen/AArch64/tst-br.ll
index 34979d99867c0..a0992a667c193 100644
--- a/llvm/test/CodeGen/AArch64/tst-br.ll
+++ b/llvm/test/CodeGen/AArch64/tst-br.ll
@@ -34,8 +34,8 @@ define i32 @test_tbz() {
 ; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
 ; CHECK-NEXT:    .loh AdrpLdr Lloh2, Lloh3
 
-  %val = load i32, i32* @var32
-  %val64 = load i64, i64* @var64
+  %val = load i32, ptr @var32
+  %val64 = load i64, ptr @var64
 
   %tbit0 = and i32 %val, 32768
   %tst0 = icmp ne i32 %tbit0, 0

diff  --git a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
index c9fb0dd1461f7..00d1069c1d13b 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
@@ -233,7 +233,7 @@ define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
   ret i32 %res
 }
 
-define i32 @safe_sub_imm_var(i8* nocapture readonly %b) local_unnamed_addr #1 {
+define i32 @safe_sub_imm_var(ptr nocapture readonly %b) local_unnamed_addr #1 {
 ; CHECK-LABEL: safe_sub_imm_var:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov w0, wzr
@@ -242,7 +242,7 @@ entry:
   ret i32 0
 }
 
-define i32 @safe_sub_var_imm(i8* nocapture readonly %b) local_unnamed_addr #1 {
+define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
 ; CHECK-LABEL: safe_sub_var_imm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -252,14 +252,14 @@ define i32 @safe_sub_var_imm(i8* nocapture readonly %b) local_unnamed_addr #1 {
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %b, align 1
+  %0 = load i8, ptr %b, align 1
   %sub = add nsw i8 %0, 8
   %cmp = icmp ugt i8 %sub, -4
   %conv4 = zext i1 %cmp to i32
   ret i32 %conv4
 }
 
-define i32 @safe_add_imm_var(i8* nocapture readnone %b) {
+define i32 @safe_add_imm_var(ptr nocapture readnone %b) {
 ; CHECK-LABEL: safe_add_imm_var:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov w0, #1
@@ -268,7 +268,7 @@ entry:
   ret i32 1
 }
 
-define i32 @safe_add_var_imm(i8* nocapture readnone %b) {
+define i32 @safe_add_var_imm(ptr nocapture readnone %b) {
 ; CHECK-LABEL: safe_add_var_imm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov w0, #1

diff  --git a/llvm/test/CodeGen/AArch64/typepromotion-phisret.ll b/llvm/test/CodeGen/AArch64/typepromotion-phisret.ll
index 24e390814795e..64b87a589fbc3 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-phisret.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-phisret.ll
@@ -217,7 +217,7 @@ exit:                                             ; preds = %if.end
   ret i16 %unrelated
 }
 
-define i16 @promote_arg_return(i16 zeroext %arg1, i16 zeroext %arg2, i8* %res) {
+define i16 @promote_arg_return(i16 zeroext %arg1, i16 zeroext %arg2, ptr %res) {
 ; CHECK-LABEL: promote_arg_return:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w0, lsl #1
@@ -230,11 +230,11 @@ define i16 @promote_arg_return(i16 zeroext %arg1, i16 zeroext %arg2, i8* %res) {
   %mul = mul nuw nsw i16 %add, 3
   %cmp = icmp ult i16 %mul, %arg2
   %conv = zext i1 %cmp to i8
-  store i8 %conv, i8* %res, align 1
+  store i8 %conv, ptr %res, align 1
   ret i16 %arg1
 }
 
-define i16 @signext_bitcast_phi_select(i16 signext %start, i16* %in) {
+define i16 @signext_bitcast_phi_select(i16 signext %start, ptr %in) {
 ; CHECK-LABEL: signext_bitcast_phi_select:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    and w8, w0, #0xffff
@@ -267,8 +267,8 @@ for.body:                                         ; preds = %if.else, %entry
   br i1 %cmp.i, label %exit, label %if.then
 
 if.then:                                          ; preds = %for.body
-  %idx.next = getelementptr i16, i16* %in, i16 %idx
-  %ld = load i16, i16* %idx.next, align 2
+  %idx.next = getelementptr i16, ptr %in, i16 %idx
+  %ld = load i16, ptr %idx.next, align 2
   %cmp1.i = icmp eq i16 %ld, %idx
   br i1 %cmp1.i, label %exit, label %if.else
 

diff  --git a/llvm/test/CodeGen/AArch64/typepromotion-signed.ll b/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
index 7ae6764769f06..b94825c28a561 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
@@ -1,14 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 %s -o - | FileCheck %s
 
-define i16 @test_signed_load(i16* nocapture readonly %ptr) {
+define i16 @test_signed_load(ptr nocapture readonly %ptr) {
 ; CHECK-LABEL: test_signed_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrsh w8, [x0]
 ; CHECK-NEXT:    tst w8, #0xffff0000
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
-  %load = load i16, i16* %ptr, align 2
+  %load = load i16, ptr %ptr, align 2
   %conv0 = zext i16 %load to i32
   %conv1 = sext i16 %load to i32
   %cmp = icmp eq i32 %conv0, %conv1
@@ -53,7 +53,7 @@ define i16 @test_srem(i16 zeroext %arg) local_unnamed_addr #1 {
   ret i16 %conv
 }
 
-define i32 @test_signext_b(i8* nocapture readonly %ptr, i8 signext %arg) {
+define i32 @test_signext_b(ptr nocapture readonly %ptr, i8 signext %arg) {
 ; CHECK-LABEL: test_signext_b:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w9, [x0]
@@ -65,14 +65,14 @@ define i32 @test_signext_b(i8* nocapture readonly %ptr, i8 signext %arg) {
 ; CHECK-NEXT:    csel w0, w9, w8, ge
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %1 = add nuw nsw i8 %0, %arg
   %cmp = icmp sgt i8 %1, -1
   %res = select i1 %cmp, i32 42, i32 20894
   ret i32 %res
 }
 
-define i32 @test_signext_b_ult_slt(i8* nocapture readonly %ptr, i8 signext %arg) {
+define i32 @test_signext_b_ult_slt(ptr nocapture readonly %ptr, i8 signext %arg) {
 ; CHECK-LABEL: test_signext_b_ult_slt:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
@@ -84,7 +84,7 @@ define i32 @test_signext_b_ult_slt(i8* nocapture readonly %ptr, i8 signext %arg)
 ; CHECK-NEXT:    csel w0, w9, w8, eq
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i8, i8* %ptr, align 1
+  %0 = load i8, ptr %ptr, align 1
   %1 = add nuw nsw i8 %0, %arg
   %cmp = icmp ne i8 %1, 127
   %cmp.1 = icmp eq i8 %0, 0
@@ -93,7 +93,7 @@ entry:
   ret i32 %res
 }
 
-define i32 @test_signext_h(i16* nocapture readonly %ptr, i16 signext %arg) {
+define i32 @test_signext_h(ptr nocapture readonly %ptr, i16 signext %arg) {
 ; CHECK-LABEL: test_signext_h:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrh w9, [x0]
@@ -105,7 +105,7 @@ define i32 @test_signext_h(i16* nocapture readonly %ptr, i16 signext %arg) {
 ; CHECK-NEXT:    csel w0, w9, w8, ge
 ; CHECK-NEXT:    ret
 entry:
-  %0 = load i16, i16* %ptr, align 1
+  %0 = load i16, ptr %ptr, align 1
   %1 = add nuw nsw i16 %0, %arg
   %cmp = icmp sgt i16 %1, -1
   %res = select i1 %cmp, i32 42, i32 20894

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index c7c06dbc302d6..ef7fde7f93888 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -94,7 +94,7 @@ define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
   ret <32 x i16> %z
 }
 
-define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+define void @v8i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -102,14 +102,14 @@ define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    uqadd v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <8 x i8>, <8 x i8>* %px
-  %y = load <8 x i8>, <8 x i8>* %py
+  %x = load <8 x i8>, ptr %px
+  %y = load <8 x i8>, ptr %py
   %z = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
-  store <8 x i8> %z, <8 x i8>* %pz
+  store <8 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s1, [x0]
@@ -122,14 +122,14 @@ define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-NEXT:    str s0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %px
-  %y = load <4 x i8>, <4 x i8>* %py
+  %x = load <4 x i8>, ptr %px
+  %y = load <4 x i8>, ptr %py
   %z = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
-  store <4 x i8> %z, <4 x i8>* %pz
+  store <4 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x1]
@@ -148,14 +148,14 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    strb w9, [x2]
 ; CHECK-NEXT:    strb w8, [x2, #1]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i8>, <2 x i8>* %px
-  %y = load <2 x i8>, <2 x i8>* %py
+  %x = load <2 x i8>, ptr %px
+  %y = load <2 x i8>, ptr %py
   %z = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
-  store <2 x i8> %z, <2 x i8>* %pz
+  store <2 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+define void @v4i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -163,14 +163,14 @@ define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    uqadd v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i16>, <4 x i16>* %px
-  %y = load <4 x i16>, <4 x i16>* %py
+  %x = load <4 x i16>, ptr %px
+  %y = load <4 x i16>, ptr %py
   %z = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
-  store <4 x i16> %z, <4 x i16>* %pz
+  store <4 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+define void @v2i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x1]
@@ -189,10 +189,10 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    strh w9, [x2]
 ; CHECK-NEXT:    strh w8, [x2, #2]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i16>, <2 x i16>* %px
-  %y = load <2 x i16>, <2 x i16>* %py
+  %x = load <2 x i16>, ptr %px
+  %y = load <2 x i16>, ptr %py
   %z = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
-  store <2 x i16> %z, <2 x i16>* %pz
+  store <2 x i16> %z, ptr %pz
   ret void
 }
 
@@ -205,7 +205,7 @@ define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
   ret <12 x i8> %z
 }
 
-define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+define void @v12i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v12i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q3, [x1]
@@ -215,14 +215,14 @@ define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind
 ; CHECK-NEXT:    str q0, [x2]
 ; CHECK-NEXT:    str d1, [x2, #16]
 ; CHECK-NEXT:    ret
-  %x = load <12 x i16>, <12 x i16>* %px
-  %y = load <12 x i16>, <12 x i16>* %py
+  %x = load <12 x i16>, ptr %px
+  %y = load <12 x i16>, ptr %py
   %z = call <12 x i16> @llvm.uadd.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
-  store <12 x i16> %z, <12 x i16>* %pz
+  store <12 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr b0, [x1]
@@ -230,14 +230,14 @@ define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    uqadd v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    st1 { v0.b }[0], [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i8>, <1 x i8>* %px
-  %y = load <1 x i8>, <1 x i8>* %py
+  %x = load <1 x i8>, ptr %px
+  %y = load <1 x i8>, ptr %py
   %z = call <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
-  store <1 x i8> %z, <1 x i8>* %pz
+  store <1 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+define void @v1i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x1]
@@ -245,10 +245,10 @@ define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    uqadd v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str h0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i16>, <1 x i16>* %px
-  %y = load <1 x i16>, <1 x i16>* %py
+  %x = load <1 x i16>, ptr %px
+  %y = load <1 x i16>, ptr %py
   %z = call <1 x i16> @llvm.uadd.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
-  store <1 x i16> %z, <1 x i16>* %pz
+  store <1 x i16> %z, ptr %pz
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/uaddo.ll b/llvm/test/CodeGen/AArch64/uaddo.ll
index 275d9a2fd771d..d7e2e4c6aec10 100644
--- a/llvm/test/CodeGen/AArch64/uaddo.ll
+++ b/llvm/test/CodeGen/AArch64/uaddo.ll
@@ -5,7 +5,7 @@
 
 ; The overflow check may be against the input rather than the sum.
 
-define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
+define i1 @uaddo_i64_increment_alt(i64 %x, ptr %p) {
 ; CHECK-LABEL: uaddo_i64_increment_alt:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x0, #1
@@ -13,14 +13,14 @@ define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
   %a = add i64 %x, 1
-  store i64 %a, i64* %p
+  store i64 %a, ptr %p
   %ov = icmp eq i64 %x, -1
   ret i1 %ov
 }
 
 ; Make sure insertion is done correctly based on dominance.
 
-define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
+define i1 @uaddo_i64_increment_alt_dom(i64 %x, ptr %p) {
 ; CHECK-LABEL: uaddo_i64_increment_alt_dom:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x0, #1
@@ -29,13 +29,13 @@ define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
 ; CHECK-NEXT:    ret
   %ov = icmp eq i64 %x, -1
   %a = add i64 %x, 1
-  store i64 %a, i64* %p
+  store i64 %a, ptr %p
   ret i1 %ov
 }
 
 ; The overflow check may be against the input rather than the sum.
 
-define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
+define i1 @uaddo_i64_decrement_alt(i64 %x, ptr %p) {
 ; CHECK-LABEL: uaddo_i64_decrement_alt:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x0, #1
@@ -43,14 +43,14 @@ define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
   %a = add i64 %x, -1
-  store i64 %a, i64* %p
+  store i64 %a, ptr %p
   %ov = icmp ne i64 %x, 0
   ret i1 %ov
 }
 
 ; Make sure insertion is done correctly based on dominance.
 
-define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) {
+define i1 @uaddo_i64_decrement_alt_dom(i64 %x, ptr %p) {
 ; CHECK-LABEL: uaddo_i64_decrement_alt_dom:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x0, #1
@@ -59,7 +59,7 @@ define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) {
 ; CHECK-NEXT:    ret
   %ov = icmp ne i64 %x, 0
   %a = add i64 %x, -1
-  store i64 %a, i64* %p
+  store i64 %a, ptr %p
   ret i1 %ov
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
index e298748e8ec26..dff19eb404d97 100644
--- a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
@@ -34,7 +34,7 @@ start:
 ; We avoid lowering the intrinsic as a libcall because this function has the same name as
 ; the libcall we wanted to generate (that would create an infinite loop).
 
-define i128 @__muloti4(i128 %0, i128 %1, i32* nocapture nonnull writeonly align 4 %2) #2 {
+define i128 @__muloti4(i128 %0, i128 %1, ptr nocapture nonnull writeonly align 4 %2) #2 {
 ; AARCH-LABEL: __muloti4:
 ; AARCH:       // %bb.0: // %Entry
 ; AARCH-NEXT:    asr x9, x1, #63
@@ -86,7 +86,7 @@ define i128 @__muloti4(i128 %0, i128 %1, i32* nocapture nonnull writeonly align
 ; AARCH-NEXT:  .LBB1_4: // %Block9
 ; AARCH-NEXT:    ret
 Entry:
-  store i32 0, i32* %2, align 4
+  store i32 0, ptr %2, align 4
   %.fr = freeze i128 %1
   %mul = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %0, i128 %.fr)
   %3 = icmp slt i128 %0, 0
@@ -99,7 +99,7 @@ Else2:                                            ; preds = %Entry
   br i1 %mul.ov, label %Then7, label %Block9
 
 Then7:                                            ; preds = %Else2, %Entry
-  store i32 1, i32* %2, align 4
+  store i32 1, ptr %2, align 4
   br label %Block9
 
 Block9:                                           ; preds = %Else2, %Then7

diff  --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index 420fd7986245e..f3c4d217e6fca 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -311,7 +311,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
 .Lcontinue:
   ret <vscale x 4 x i32> %result
 .Lunwind:
-  %lp = landingpad { i8*, i32 } cleanup
+  %lp = landingpad { ptr, i32 } cleanup
   ret <vscale x 4 x i32> %v;
 }
 
@@ -537,7 +537,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 .Lcontinue:
   ret <4 x i32> %result
 .Lunwind:
-  %lp = landingpad { i8*, i32 } cleanup
+  %lp = landingpad { ptr, i32 } cleanup
   ret <4 x i32> %v;
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index 733553afb1287..039e11654bab7 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -95,7 +95,7 @@ define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) nounwind {
   ret <32 x i16> %z
 }
 
-define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
+define void @v8i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -103,14 +103,14 @@ define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    uqsub v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <8 x i8>, <8 x i8>* %px
-  %y = load <8 x i8>, <8 x i8>* %py
+  %x = load <8 x i8>, ptr %px
+  %y = load <8 x i8>, ptr %py
   %z = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %x, <8 x i8> %y)
-  store <8 x i8> %z, <8 x i8>* %pz
+  store <8 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
+define void @v4i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
@@ -121,14 +121,14 @@ define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    xtn v0.8b, v0.8h
 ; CHECK-NEXT:    str s0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i8>, <4 x i8>* %px
-  %y = load <4 x i8>, <4 x i8>* %py
+  %x = load <4 x i8>, ptr %px
+  %y = load <4 x i8>, ptr %py
   %z = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %x, <4 x i8> %y)
-  store <4 x i8> %z, <4 x i8>* %pz
+  store <4 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
+define void @v2i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrb w8, [x1]
@@ -145,14 +145,14 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    strb w9, [x2]
 ; CHECK-NEXT:    strb w8, [x2, #1]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i8>, <2 x i8>* %px
-  %y = load <2 x i8>, <2 x i8>* %py
+  %x = load <2 x i8>, ptr %px
+  %y = load <2 x i8>, ptr %py
   %z = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x, <2 x i8> %y)
-  store <2 x i8> %z, <2 x i8>* %pz
+  store <2 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
+define void @v4i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr d0, [x1]
@@ -160,14 +160,14 @@ define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    uqsub v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str d0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <4 x i16>, <4 x i16>* %px
-  %y = load <4 x i16>, <4 x i16>* %py
+  %x = load <4 x i16>, ptr %px
+  %y = load <4 x i16>, ptr %py
   %z = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %x, <4 x i16> %y)
-  store <4 x i16> %z, <4 x i16>* %pz
+  store <4 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
+define void @v2i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v2i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldrh w8, [x1]
@@ -184,10 +184,10 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    strh w9, [x2]
 ; CHECK-NEXT:    strh w8, [x2, #2]
 ; CHECK-NEXT:    ret
-  %x = load <2 x i16>, <2 x i16>* %px
-  %y = load <2 x i16>, <2 x i16>* %py
+  %x = load <2 x i16>, ptr %px
+  %y = load <2 x i16>, ptr %py
   %z = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %x, <2 x i16> %y)
-  store <2 x i16> %z, <2 x i16>* %pz
+  store <2 x i16> %z, ptr %pz
   ret void
 }
 
@@ -200,7 +200,7 @@ define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) nounwind {
   ret <12 x i8> %z
 }
 
-define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind {
+define void @v12i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v12i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q3, [x1]
@@ -210,14 +210,14 @@ define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) nounwind
 ; CHECK-NEXT:    str q0, [x2]
 ; CHECK-NEXT:    str d1, [x2, #16]
 ; CHECK-NEXT:    ret
-  %x = load <12 x i16>, <12 x i16>* %px
-  %y = load <12 x i16>, <12 x i16>* %py
+  %x = load <12 x i16>, ptr %px
+  %y = load <12 x i16>, ptr %py
   %z = call <12 x i16> @llvm.usub.sat.v12i16(<12 x i16> %x, <12 x i16> %y)
-  store <12 x i16> %z, <12 x i16>* %pz
+  store <12 x i16> %z, ptr %pz
   ret void
 }
 
-define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
+define void @v1i8(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr b0, [x1]
@@ -225,14 +225,14 @@ define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) nounwind {
 ; CHECK-NEXT:    uqsub v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    st1 { v0.b }[0], [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i8>, <1 x i8>* %px
-  %y = load <1 x i8>, <1 x i8>* %py
+  %x = load <1 x i8>, ptr %px
+  %y = load <1 x i8>, ptr %py
   %z = call <1 x i8> @llvm.usub.sat.v1i8(<1 x i8> %x, <1 x i8> %y)
-  store <1 x i8> %z, <1 x i8>* %pz
+  store <1 x i8> %z, ptr %pz
   ret void
 }
 
-define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
+define void @v1i16(ptr %px, ptr %py, ptr %pz) nounwind {
 ; CHECK-LABEL: v1i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr h0, [x1]
@@ -240,10 +240,10 @@ define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) nounwind {
 ; CHECK-NEXT:    uqsub v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    str h0, [x2]
 ; CHECK-NEXT:    ret
-  %x = load <1 x i16>, <1 x i16>* %px
-  %y = load <1 x i16>, <1 x i16>* %py
+  %x = load <1 x i16>, ptr %px
+  %y = load <1 x i16>, ptr %py
   %z = call <1 x i16> @llvm.usub.sat.v1i16(<1 x i16> %x, <1 x i16> %y)
-  store <1 x i16> %z, <1 x i16>* %pz
+  store <1 x i16> %z, ptr %pz
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/v8.4-atomic-128.ll b/llvm/test/CodeGen/AArch64/v8.4-atomic-128.ll
index 2cd260ead8569..9988a74d4fd16 100644
--- a/llvm/test/CodeGen/AArch64/v8.4-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/v8.4-atomic-128.ll
@@ -1,194 +1,174 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+v8.4a %s -o - | FileCheck %s
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+lse2 %s -o - | FileCheck %s
 
-define void @test_atomic_load(i128* %addr) {
+define void @test_atomic_load(ptr %addr) {
 ; CHECK-LABEL: test_atomic_load:
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %res.0 = load atomic i128, i128* %addr monotonic, align 16
-  store i128 %res.0, i128* %addr
+  %res.0 = load atomic i128, ptr %addr monotonic, align 16
+  store i128 %res.0, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %res.1 = load atomic i128, i128* %addr unordered, align 16
-  store i128 %res.1, i128* %addr
+  %res.1 = load atomic i128, ptr %addr unordered, align 16
+  store i128 %res.1, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: dmb ish
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %res.2 = load atomic i128, i128* %addr acquire, align 16
-  store i128 %res.2, i128* %addr
+  %res.2 = load atomic i128, ptr %addr acquire, align 16
+  store i128 %res.2, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: dmb ish
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %res.3 = load atomic i128, i128* %addr seq_cst, align 16
-  store i128 %res.3, i128* %addr
+  %res.3 = load atomic i128, ptr %addr seq_cst, align 16
+  store i128 %res.3, ptr %addr
 
 
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #32]
 ; CHECK-DAG: stp [[LO]], [[HI]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 32
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.5 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.5, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 32
+  %res.5 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.5, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #504]
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %addr8.2 = getelementptr i8,  i8* %addr8, i32 504
-  %addr128.2 = bitcast i8* %addr8.2 to i128*
-  %res.6 = load atomic i128, i128* %addr128.2 monotonic, align 16
-  store i128 %res.6, i128* %addr
+  %addr8.2 = getelementptr i8,  ptr %addr, i32 504
+  %res.6 = load atomic i128, ptr %addr8.2 monotonic, align 16
+  store i128 %res.6, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #-512]
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %addr8.3 = getelementptr i8,  i8* %addr8, i32 -512
-  %addr128.3 = bitcast i8* %addr8.3 to i128*
-  %res.7 = load atomic i128, i128* %addr128.3 monotonic, align 16
-  store i128 %res.7, i128* %addr
+  %addr8.3 = getelementptr i8,  ptr %addr, i32 -512
+  %res.7 = load atomic i128, ptr %addr8.3 monotonic, align 16
+  store i128 %res.7, ptr %addr
 
   ret void
 }
 
-define void @test_libcall_load(i128* %addr) {
+define void @test_libcall_load(ptr %addr) {
 ; CHECK-LABEL: test_libcall_load:
 ; CHECK: bl __atomic_load
-  %res.8 = load atomic i128, i128* %addr unordered, align 8
-  store i128 %res.8, i128* %addr
+  %res.8 = load atomic i128, ptr %addr unordered, align 8
+  store i128 %res.8, ptr %addr
 
   ret void
 }
 
-define void @test_nonfolded_load1(i128* %addr) {
+define void @test_nonfolded_load1(ptr %addr) {
 ; CHECK-LABEL: test_nonfolded_load1:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #4
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 4
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.1 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.1, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 4
+  %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.1, ptr %addr
 
   ret void
 }
 
-define void @test_nonfolded_load2(i128* %addr) {
+define void @test_nonfolded_load2(ptr %addr) {
 ; CHECK-LABEL: test_nonfolded_load2:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #512
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 512
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.1 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.1, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 512
+  %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.1, ptr %addr
 
   ret void
 }
 
-define void @test_nonfolded_load3(i128* %addr) {
+define void @test_nonfolded_load3(ptr %addr) {
 ; CHECK-LABEL: test_nonfolded_load3:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: sub x[[ADDR:[0-9]+]], x0, #520
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
 ; CHECK: stp [[LO]], [[HI]], [x0]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 -520
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  %res.1 = load atomic i128, i128* %addr128.1 monotonic, align 16
-  store i128 %res.1, i128* %addr
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 -520
+  %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
+  store i128 %res.1, ptr %addr
 
   ret void
 }
 
-define void @test_atomic_store(i128* %addr, i128 %val) {
+define void @test_atomic_store(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_atomic_store:
 
 ; CHECK: stp x2, x3, [x0]
-  store atomic i128 %val, i128* %addr monotonic, align 16
+  store atomic i128 %val, ptr %addr monotonic, align 16
 
 ; CHECK: stp x2, x3, [x0]
-  store atomic i128 %val, i128* %addr unordered, align 16
+  store atomic i128 %val, ptr %addr unordered, align 16
 
 ; CHECK: dmb ish
 ; CHECK: stp x2, x3, [x0]
-  store atomic i128 %val, i128* %addr release, align 16
+  store atomic i128 %val, ptr %addr release, align 16
 
 ; CHECK: dmb ish
 ; CHECK: stp x2, x3, [x0]
 ; CHECK: dmb ish
-  store atomic i128 %val, i128* %addr seq_cst, align 16
+  store atomic i128 %val, ptr %addr seq_cst, align 16
 
 
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: stp x2, x3, [x0, #8]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 8
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 8
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
 ; CHECK: stp x2, x3, [x0, #504]
-  %addr8.2 = getelementptr i8,  i8* %addr8, i32 504
-  %addr128.2 = bitcast i8* %addr8.2 to i128*
-  store atomic i128 %val, i128* %addr128.2 monotonic, align 16
+  %addr8.2 = getelementptr i8,  ptr %addr, i32 504
+  store atomic i128 %val, ptr %addr8.2 monotonic, align 16
 
 ; CHECK: stp x2, x3, [x0, #-512]
-  %addr8.3 = getelementptr i8,  i8* %addr8, i32 -512
-  %addr128.3 = bitcast i8* %addr8.3 to i128*
-  store atomic i128 %val, i128* %addr128.3 monotonic, align 16
+  %addr8.3 = getelementptr i8,  ptr %addr, i32 -512
+  store atomic i128 %val, ptr %addr8.3 monotonic, align 16
 
   ret void
 }
 
-define void @test_libcall_store(i128* %addr, i128 %val) {
+define void @test_libcall_store(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_libcall_store:
 ; CHECK: bl __atomic_store
-  store atomic i128 %val, i128* %addr unordered, align 8
+  store atomic i128 %val, ptr %addr unordered, align 8
 
   ret void
 }
 
-define void @test_nonfolded_store1(i128* %addr, i128 %val) {
+define void @test_nonfolded_store1(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_nonfolded_store1:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #4
 ; CHECK: stp x2, x3, [x[[ADDR]]]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 4
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 4
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
   ret void
 }
 
-define void @test_nonfolded_store2(i128* %addr, i128 %val) {
+define void @test_nonfolded_store2(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_nonfolded_store2:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #512
 ; CHECK: stp x2, x3, [x[[ADDR]]]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 512
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 512
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
   ret void
 }
 
-define void @test_nonfolded_store3(i128* %addr, i128 %val) {
+define void @test_nonfolded_store3(ptr %addr, i128 %val) {
 ; CHECK-LABEL: test_nonfolded_store3:
-  %addr8 = bitcast i128* %addr to i8*
 
 ; CHECK: sub x[[ADDR:[0-9]+]], x0, #520
 ; CHECK: stp x2, x3, [x[[ADDR]]]
-  %addr8.1 = getelementptr i8,  i8* %addr8, i32 -520
-  %addr128.1 = bitcast i8* %addr8.1 to i128*
-  store atomic i128 %val, i128* %addr128.1 monotonic, align 16
+  %addr8.1 = getelementptr i8,  ptr %addr, i32 -520
+  store atomic i128 %val, ptr %addr8.1 monotonic, align 16
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/vararg-tallcall.ll b/llvm/test/CodeGen/AArch64/vararg-tallcall.ll
index aa239a8d57ec9..2d6db1642247d 100644
--- a/llvm/test/CodeGen/AArch64/vararg-tallcall.ll
+++ b/llvm/test/CodeGen/AArch64/vararg-tallcall.ll
@@ -6,22 +6,20 @@
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 
 %class.X = type { i8 }
-%struct.B = type { i32 (...)** }
+%struct.B = type { ptr }
 
 $"??_9B@@$BA at AA" = comdat any
 
 ; Function Attrs: noinline optnone
-define linkonce_odr void @"??_9B@@$BA at AA"(%struct.B* %this, ...) #1 comdat align 2  {
+define linkonce_odr void @"??_9B@@$BA at AA"(ptr %this, ...) #1 comdat align 2  {
 entry:
-  %this.addr = alloca %struct.B*, align 8
-  store %struct.B* %this, %struct.B** %this.addr, align 8
-  %this1 = load %struct.B*, %struct.B** %this.addr, align 8
+  %this.addr = alloca ptr, align 8
+  store ptr %this, ptr %this.addr, align 8
+  %this1 = load ptr, ptr %this.addr, align 8
   call void asm sideeffect "", "~{d0}"()
-  %0 = bitcast %struct.B* %this1 to void (%struct.B*, ...)***
-  %vtable = load void (%struct.B*, ...)**, void (%struct.B*, ...)*** %0, align 8
-  %vfn = getelementptr inbounds void (%struct.B*, ...)*, void (%struct.B*, ...)** %vtable, i64 0
-  %1 = load void (%struct.B*, ...)*, void (%struct.B*, ...)** %vfn, align 8
-  musttail call void (%struct.B*, ...) %1(%struct.B* %this1, ...)
+  %vtable = load ptr, ptr %this1, align 8
+  %0 = load ptr, ptr %vtable, align 8
+  musttail call void (ptr, ...) %0(ptr %this1, ...)
   ret void
                                                   ; No predecessors!
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
index 6f4b30b520998..53d166b4c6569 100644
--- a/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
+++ b/llvm/test/CodeGen/AArch64/vcvt-oversize.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
 
-define <8 x i8> @float_to_i8(<8 x float>* %in) {
+define <8 x i8> @float_to_i8(ptr %in) {
 ; CHECK-LABEL: float_to_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
@@ -13,7 +13,7 @@ define <8 x i8> @float_to_i8(<8 x float>* %in) {
 ; CHECK-NEXT:    xtn v1.4h, v1.4s
 ; CHECK-NEXT:    uzp1 v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %l = load <8 x float>, <8 x float>* %in
+  %l = load <8 x float>, ptr %in
   %scale = fmul <8 x float> %l, <float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0, float 2.0>
   %conv = fptoui <8 x float> %scale to <8 x i8>
   ret <8 x i8> %conv

diff  --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
index 9e73cc5195e4a..4ccc2c642a0dc 100644
--- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
@@ -16,7 +16,7 @@ declare {<4 x i24>, <4 x i1>} @llvm.uadd.with.overflow.v4i24(<4 x i24>, <4 x i24
 declare {<4 x i1>, <4 x i1>} @llvm.uadd.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.uadd.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
-define <1 x i32> @uaddo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
+define <1 x i32> @uaddo_v1i32(<1 x i32> %a0, <1 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v1i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v1.2s, v0.2s, v1.2s
@@ -27,11 +27,11 @@ define <1 x i32> @uaddo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) noun
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
   %res = sext <1 x i1> %obit to <1 x i32>
-  store <1 x i32> %val, <1 x i32>* %p2
+  store <1 x i32> %val, ptr %p2
   ret <1 x i32> %res
 }
 
-define <2 x i32> @uaddo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind {
+define <2 x i32> @uaddo_v2i32(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v1.2s, v0.2s, v1.2s
@@ -42,11 +42,11 @@ define <2 x i32> @uaddo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun
   %val = extractvalue {<2 x i32>, <2 x i1>} %t, 0
   %obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1
   %res = sext <2 x i1> %obit to <2 x i32>
-  store <2 x i32> %val, <2 x i32>* %p2
+  store <2 x i32> %val, ptr %p2
   ret <2 x i32> %res
 }
 
-define <3 x i32> @uaddo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind {
+define <3 x i32> @uaddo_v3i32(<3 x i32> %a0, <3 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v3i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
@@ -59,11 +59,11 @@ define <3 x i32> @uaddo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
   %val = extractvalue {<3 x i32>, <3 x i1>} %t, 0
   %obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1
   %res = sext <3 x i1> %obit to <3 x i32>
-  store <3 x i32> %val, <3 x i32>* %p2
+  store <3 x i32> %val, ptr %p2
   ret <3 x i32> %res
 }
 
-define <4 x i32> @uaddo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind {
+define <4 x i32> @uaddo_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
@@ -74,11 +74,11 @@ define <4 x i32> @uaddo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun
   %val = extractvalue {<4 x i32>, <4 x i1>} %t, 0
   %obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1
   %res = sext <4 x i1> %obit to <4 x i32>
-  store <4 x i32> %val, <4 x i32>* %p2
+  store <4 x i32> %val, ptr %p2
   ret <4 x i32> %res
 }
 
-define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind {
+define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v6i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov s0, w6
@@ -114,11 +114,11 @@ define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun
   %val = extractvalue {<6 x i32>, <6 x i1>} %t, 0
   %obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1
   %res = sext <6 x i1> %obit to <6 x i32>
-  store <6 x i32> %val, <6 x i32>* %p2
+  store <6 x i32> %val, ptr %p2
   ret <6 x i32> %res
 }
 
-define <8 x i32> @uaddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind {
+define <8 x i32> @uaddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v3.4s, v1.4s, v3.4s
@@ -131,11 +131,11 @@ define <8 x i32> @uaddo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun
   %val = extractvalue {<8 x i32>, <8 x i1>} %t, 0
   %obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1
   %res = sext <8 x i1> %obit to <8 x i32>
-  store <8 x i32> %val, <8 x i32>* %p2
+  store <8 x i32> %val, ptr %p2
   ret <8 x i32> %res
 }
 
-define <16 x i32> @uaddo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind {
+define <16 x i32> @uaddo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v4.16b, v0.16b, v1.16b
@@ -163,11 +163,11 @@ define <16 x i32> @uaddo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
   %val = extractvalue {<16 x i8>, <16 x i1>} %t, 0
   %obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1
   %res = sext <16 x i1> %obit to <16 x i32>
-  store <16 x i8> %val, <16 x i8>* %p2
+  store <16 x i8> %val, ptr %p2
   ret <16 x i32> %res
 }
 
-define <8 x i32> @uaddo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind {
+define <8 x i32> @uaddo_v8i16(<8 x i16> %a0, <8 x i16> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v2.8h, v0.8h, v1.8h
@@ -187,11 +187,11 @@ define <8 x i32> @uaddo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) noun
   %val = extractvalue {<8 x i16>, <8 x i1>} %t, 0
   %obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1
   %res = sext <8 x i1> %obit to <8 x i32>
-  store <8 x i16> %val, <8 x i16>* %p2
+  store <8 x i16> %val, ptr %p2
   ret <8 x i32> %res
 }
 
-define <2 x i32> @uaddo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind {
+define <2 x i32> @uaddo_v2i64(<2 x i64> %a0, <2 x i64> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v1.2d, v0.2d, v1.2d
@@ -203,11 +203,11 @@ define <2 x i32> @uaddo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
   %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0
   %obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1
   %res = sext <2 x i1> %obit to <2 x i32>
-  store <2 x i64> %val, <2 x i64>* %p2
+  store <2 x i64> %val, ptr %p2
   ret <2 x i32> %res
 }
 
-define <4 x i32> @uaddo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind {
+define <4 x i32> @uaddo_v4i24(<4 x i24> %a0, <4 x i24> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v4i24:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    bic v1.4s, #255, lsl #24
@@ -238,11 +238,11 @@ define <4 x i32> @uaddo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
   %val = extractvalue {<4 x i24>, <4 x i1>} %t, 0
   %obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1
   %res = sext <4 x i1> %obit to <4 x i32>
-  store <4 x i24> %val, <4 x i24>* %p2
+  store <4 x i24> %val, ptr %p2
   ret <4 x i32> %res
 }
 
-define <4 x i32> @uaddo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind {
+define <4 x i32> @uaddo_v4i1(<4 x i1> %a0, <4 x i1> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v4i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v2.4h, #1
@@ -268,11 +268,11 @@ define <4 x i32> @uaddo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
   %val = extractvalue {<4 x i1>, <4 x i1>} %t, 0
   %obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1
   %res = sext <4 x i1> %obit to <4 x i32>
-  store <4 x i1> %val, <4 x i1>* %p2
+  store <4 x i1> %val, ptr %p2
   ret <4 x i32> %res
 }
 
-define <2 x i32> @uaddo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind {
+define <2 x i32> @uaddo_v2i128(<2 x i128> %a0, <2 x i128> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x2, x6
@@ -293,6 +293,6 @@ define <2 x i32> @uaddo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
   %val = extractvalue {<2 x i128>, <2 x i1>} %t, 0
   %obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1
   %res = sext <2 x i1> %obit to <2 x i32>
-  store <2 x i128> %val, <2 x i128>* %p2
+  store <2 x i128> %val, ptr %p2
   ret <2 x i32> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/vec_umulo.ll b/llvm/test/CodeGen/AArch64/vec_umulo.ll
index 4b61a873706ad..c419653279eff 100644
--- a/llvm/test/CodeGen/AArch64/vec_umulo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_umulo.ll
@@ -16,7 +16,7 @@ declare {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24>, <4 x i24
 declare {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
-define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
+define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v1i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull v1.2d, v0.2s, v1.2s
@@ -29,11 +29,11 @@ define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) noun
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
   %res = sext <1 x i1> %obit to <1 x i32>
-  store <1 x i32> %val, <1 x i32>* %p2
+  store <1 x i32> %val, ptr %p2
   ret <1 x i32> %res
 }
 
-define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind {
+define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v2i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull v1.2d, v0.2s, v1.2s
@@ -46,11 +46,11 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun
   %val = extractvalue {<2 x i32>, <2 x i1>} %t, 0
   %obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1
   %res = sext <2 x i1> %obit to <2 x i32>
-  store <2 x i32> %val, <2 x i32>* %p2
+  store <2 x i32> %val, ptr %p2
   ret <2 x i32> %res
 }
 
-define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind {
+define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v3i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull2 v2.2d, v0.4s, v1.4s
@@ -67,11 +67,11 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
   %val = extractvalue {<3 x i32>, <3 x i1>} %t, 0
   %obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1
   %res = sext <3 x i1> %obit to <3 x i32>
-  store <3 x i32> %val, <3 x i32>* %p2
+  store <3 x i32> %val, ptr %p2
   ret <3 x i32> %res
 }
 
-define <4 x i32> @umulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind {
+define <4 x i32> @umulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull2 v2.2d, v0.4s, v1.4s
@@ -86,11 +86,11 @@ define <4 x i32> @umulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun
   %val = extractvalue {<4 x i32>, <4 x i1>} %t, 0
   %obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1
   %res = sext <4 x i1> %obit to <4 x i32>
-  store <4 x i32> %val, <4 x i32>* %p2
+  store <4 x i32> %val, ptr %p2
   ret <4 x i32> %res
 }
 
-define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind {
+define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v6i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov s0, w6
@@ -132,11 +132,11 @@ define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun
   %val = extractvalue {<6 x i32>, <6 x i1>} %t, 0
   %obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1
   %res = sext <6 x i1> %obit to <6 x i32>
-  store <6 x i32> %val, <6 x i32>* %p2
+  store <6 x i32> %val, ptr %p2
   ret <6 x i32> %res
 }
 
-define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind {
+define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v8i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull2 v4.2d, v1.4s, v3.4s
@@ -157,11 +157,11 @@ define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) noun
   %val = extractvalue {<8 x i32>, <8 x i1>} %t, 0
   %obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1
   %res = sext <8 x i1> %obit to <8 x i32>
-  store <8 x i32> %val, <8 x i32>* %p2
+  store <8 x i32> %val, ptr %p2
   ret <8 x i32> %res
 }
 
-define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind {
+define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull2 v2.8h, v0.16b, v1.16b
@@ -192,11 +192,11 @@ define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nou
   %val = extractvalue {<16 x i8>, <16 x i1>} %t, 0
   %obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1
   %res = sext <16 x i1> %obit to <16 x i32>
-  store <16 x i8> %val, <16 x i8>* %p2
+  store <16 x i8> %val, ptr %p2
   ret <16 x i32> %res
 }
 
-define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind {
+define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    umull2 v2.4s, v0.8h, v1.8h
@@ -221,11 +221,11 @@ define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) noun
   %val = extractvalue {<8 x i16>, <8 x i1>} %t, 0
   %obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1
   %res = sext <8 x i1> %obit to <8 x i32>
-  store <8 x i16> %val, <8 x i16>* %p2
+  store <8 x i16> %val, ptr %p2
   ret <8 x i32> %res
 }
 
-define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind {
+define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, v1.d[1]
@@ -251,11 +251,11 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
   %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0
   %obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1
   %res = sext <2 x i1> %obit to <2 x i32>
-  store <2 x i64> %val, <2 x i64>* %p2
+  store <2 x i64> %val, ptr %p2
   ret <2 x i32> %res
 }
 
-define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind {
+define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v4i24:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    bic v1.4s, #255, lsl #24
@@ -289,11 +289,11 @@ define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
   %val = extractvalue {<4 x i24>, <4 x i1>} %t, 0
   %obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1
   %res = sext <4 x i1> %obit to <4 x i32>
-  store <4 x i24> %val, <4 x i24>* %p2
+  store <4 x i24> %val, ptr %p2
   ret <4 x i32> %res
 }
 
-define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind {
+define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v4i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov d2, d0
@@ -314,11 +314,11 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
   %val = extractvalue {<4 x i1>, <4 x i1>} %t, 0
   %obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1
   %res = sext <4 x i1> %obit to <4 x i32>
-  store <4 x i1> %val, <4 x i1>* %p2
+  store <4 x i1> %val, ptr %p2
   ret <4 x i32> %res
 }
 
-define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind {
+define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: umulo_v2i128:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul x8, x7, x2
@@ -361,6 +361,6 @@ define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
   %val = extractvalue {<2 x i128>, <2 x i1>} %t, 0
   %obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1
   %res = sext <2 x i1> %obit to <2 x i32>
-  store <2 x i128> %val, <2 x i128>* %p2
+  store <2 x i128> %val, ptr %p2
   ret <2 x i32> %res
 }

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
index e83105f0bfdec..83689f2625cf4 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
@@ -238,7 +238,7 @@ define float @add_S_init_42(<4 x float> %bin.rdx)  {
 
 ; FIXME: The faddp.4s in the loop should not use v0.4s as second operand,
 ; because this introduces an unnecessary cross-iteration dependency.
-define float @fadd_reduction_v4f32_in_loop(float* %ptr.start) {
+define float @fadd_reduction_v4f32_in_loop(ptr %ptr.start) {
 ; CHECK-LABEL: fadd_reduction_v4f32_in_loop:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    movi d0, #0000000000000000
@@ -259,14 +259,13 @@ entry:
 
 loop:
   %iv = phi i32 [ 1, %entry ], [ %iv.next, %loop ]
-  %ptr = phi float* [ %ptr.start, %entry ], [ %ptr.next, %loop ]
+  %ptr = phi ptr [ %ptr.start, %entry ], [ %ptr.next, %loop ]
   %red = phi float [ 0.000000e+00, %entry ], [ %red.next, %loop ]
-  %ptr.bc = bitcast float* %ptr to <4 x float>*
-  %lv = load <4 x float>, <4 x float>* %ptr.bc, align 4
+  %lv = load <4 x float>, ptr %ptr, align 4
   %r = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %lv)
   %red.next = fadd fast float %r, %red
   %ec = icmp eq i32 %iv, 7
-  %ptr.next = getelementptr inbounds float, float* %ptr, i64 4
+  %ptr.next = getelementptr inbounds float, ptr %ptr, i64 4
   %iv.next= add nuw nsw i32 %iv, 1
   br i1 %ec, label %exit, label %loop
 
@@ -276,7 +275,7 @@ exit:
 
 ; FIXME: The faddp.4h in the loop should not use v0.4h as second operand,
 ; because this introduces an unnecessary cross-iteration dependency.
-define half @fadd_reduction_v4f16_in_loop(half* %ptr.start) {
+define half @fadd_reduction_v4f16_in_loop(ptr %ptr.start) {
 ; FULLFP16-LABEL: fadd_reduction_v4f16_in_loop:
 ; FULLFP16:       // %bb.0: // %entry
 ; FULLFP16-NEXT:    movi d0, #0000000000000000
@@ -330,14 +329,13 @@ entry:
 
 loop:
   %iv = phi i32 [ 1, %entry ], [ %iv.next, %loop ]
-  %ptr = phi half* [ %ptr.start, %entry ], [ %ptr.next, %loop ]
+  %ptr = phi ptr [ %ptr.start, %entry ], [ %ptr.next, %loop ]
   %red = phi half [ 0.000000e+00, %entry ], [ %red.next, %loop ]
-  %ptr.bc = bitcast half* %ptr to <4 x half>*
-  %lv = load <4 x half>, <4 x half>* %ptr.bc, align 4
+  %lv = load <4 x half>, ptr %ptr, align 4
   %r = call fast half @llvm.vector.reduce.fadd.f16.v4f16(half -0.0, <4 x half> %lv)
   %red.next = fadd fast half %r, %red
   %ec = icmp eq i32 %iv, 7
-  %ptr.next = getelementptr inbounds half, half* %ptr, i64 4
+  %ptr.next = getelementptr inbounds half, ptr %ptr, i64 4
   %iv.next= add nuw nsw i32 %iv, 1
   br i1 %ec, label %exit, label %loop
 
@@ -347,7 +345,7 @@ exit:
 
 ; FIXME: The faddp.8h in the loop should not use v0.8h as second operand,
 ; because this introduces an unnecessary cross-iteration dependency.
-define half @fadd_reduction_v8f16_in_loop(half* %ptr.start) {
+define half @fadd_reduction_v8f16_in_loop(ptr %ptr.start) {
 ; FULLFP16-LABEL: fadd_reduction_v8f16_in_loop:
 ; FULLFP16:       // %bb.0: // %entry
 ; FULLFP16-NEXT:    movi d0, #0000000000000000
@@ -422,14 +420,13 @@ entry:
 
 loop:
   %iv = phi i32 [ 1, %entry ], [ %iv.next, %loop ]
-  %ptr = phi half* [ %ptr.start, %entry ], [ %ptr.next, %loop ]
+  %ptr = phi ptr [ %ptr.start, %entry ], [ %ptr.next, %loop ]
   %red = phi half [ 0.000000e+00, %entry ], [ %red.next, %loop ]
-  %ptr.bc = bitcast half* %ptr to <8 x half>*
-  %lv = load <8 x half>, <8 x half>* %ptr.bc, align 4
+  %lv = load <8 x half>, ptr %ptr, align 4
   %r = call fast half @llvm.vector.reduce.fadd.f16.v8f16(half -0.0, <8 x half> %lv)
   %red.next = fadd fast half %r, %red
   %ec = icmp eq i32 %iv, 7
-  %ptr.next = getelementptr inbounds half, half* %ptr, i64 4
+  %ptr.next = getelementptr inbounds half, ptr %ptr, i64 4
   %iv.next= add nuw nsw i32 %iv, 1
   br i1 %ec, label %exit, label %loop
 

diff  --git a/llvm/test/CodeGen/AArch64/vector-gep.ll b/llvm/test/CodeGen/AArch64/vector-gep.ll
index f1c80a473de37..c7858416e1796 100644
--- a/llvm/test/CodeGen/AArch64/vector-gep.ll
+++ b/llvm/test/CodeGen/AArch64/vector-gep.ll
@@ -8,7 +8,7 @@ target triple = "arm64_32-apple-watchos2.0.0"
 ; CHECK-NEXT:    .quad 36
 ; CHECK-NEXT:    .quad 4804
 
-define <2 x i8*> @vector_gep(<2 x i8*> %0) {
+define <2 x ptr> @vector_gep(<2 x ptr> %0) {
 ; CHECK-LABEL: vector_gep:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:  Lloh0:
@@ -21,6 +21,6 @@ define <2 x i8*> @vector_gep(<2 x i8*> %0) {
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
 entry:
-  %1 = getelementptr i8, <2 x i8*> %0, <2 x i32> <i32 36, i32 4804>
-  ret <2 x i8*> %1
+  %1 = getelementptr i8, <2 x ptr> %0, <2 x i32> <i32 36, i32 4804>
+  ret <2 x ptr> %1
 }

diff  --git a/llvm/test/CodeGen/AArch64/vector-insert-shuffle-cycle.ll b/llvm/test/CodeGen/AArch64/vector-insert-shuffle-cycle.ll
index 57e7ef1a0e77f..98486a190cec0 100644
--- a/llvm/test/CodeGen/AArch64/vector-insert-shuffle-cycle.ll
+++ b/llvm/test/CodeGen/AArch64/vector-insert-shuffle-cycle.ll
@@ -5,7 +5,7 @@ target triple = "arm64-apple-ios13.4.0"
 
 ; Make we do not get stuck in a cycle in DAGCombiner.
 
-define void @test(i1 %c, <1 x double>* %ptr) {
+define void @test(i1 %c, ptr %ptr) {
 ; CHECK-LABEL: test:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    movi d0, #0000000000000000
@@ -21,15 +21,15 @@ entry:
   br i1 %c, label %bb1, label %bb2
 
 bb1:
-  %lv1 = load <1 x double>, <1 x double>* %ptr, align 16
+  %lv1 = load <1 x double>, ptr %ptr, align 16
   br label %bb2
 
 bb2:
   %p = phi <1 x double> [ %lv1, %bb1 ], [ zeroinitializer, %entry ]
   %vecext19 = extractelement <1 x double> %p, i32 0
-  %arrayidx21 = getelementptr inbounds [4 x <4 x double>], [4 x <4 x double>]* undef, i64 0, i64 3
-  %lv2 = load <4 x double>, <4 x double>* %arrayidx21, align 16
+  %arrayidx21 = getelementptr inbounds [4 x <4 x double>], ptr undef, i64 0, i64 3
+  %lv2 = load <4 x double>, ptr %arrayidx21, align 16
   %vecins22 = insertelement <4 x double> %lv2, double %vecext19, i32 2
-  store <4 x double> %vecins22, <4 x double>* %arrayidx21, align 16
+  store <4 x double> %vecins22, ptr %arrayidx21, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll
index dd83a694ceca6..623ea22000c83 100644
--- a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll
+++ b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll
@@ -11,14 +11,14 @@ target triple = "aarch64--linux-android"
 %"class.std::__1::complex.0.20.56.60.64.72.76.88.92.112.140.248" = type { float, float }
 
 ; Function Attrs: noinline norecurse nounwind ssp uwtable
-define void @fn(<2 x i64>* %argA, <2 x i64>* %argB, i64* %a) #0 align 2 {
-  %_p_vec_full = load <2 x i64>, <2 x i64>* %argA, align 4, !alias.scope !9, !noalias !3
+define void @fn(ptr %argA, ptr %argB, ptr %a) #0 align 2 {
+  %_p_vec_full = load <2 x i64>, ptr %argA, align 4, !alias.scope !9, !noalias !3
   %x = extractelement <2 x i64> %_p_vec_full, i32 1
-  store i64 %x, i64* %a, align 8, !alias.scope !3, !noalias !9
-  %_p_vec_full155 = load <2 x i64>, <2 x i64>* %argB, align 4, !alias.scope !9, !noalias !3
+  store i64 %x, ptr %a, align 8, !alias.scope !3, !noalias !9
+  %_p_vec_full155 = load <2 x i64>, ptr %argB, align 4, !alias.scope !9, !noalias !3
   %y = extractelement <2 x i64> %_p_vec_full155, i32 0
-  %scevgep41 = getelementptr i64, i64* %a, i64 -1
-  store i64 %y, i64* %scevgep41, align 8, !alias.scope !3, !noalias !9
+  %scevgep41 = getelementptr i64, ptr %a, i64 -1
+  store i64 %y, ptr %scevgep41, align 8, !alias.scope !3, !noalias !9
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/vldn_shuffle.ll b/llvm/test/CodeGen/AArch64/vldn_shuffle.ll
index d72dcd5ca05e3..693ad10444057 100644
--- a/llvm/test/CodeGen/AArch64/vldn_shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/vldn_shuffle.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
 
-define void @vld2(float* nocapture readonly %pSrc, float* noalias nocapture %pDst, i32 %numSamples) {
+define void @vld2(ptr nocapture readonly %pSrc, ptr noalias nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: vld2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -22,26 +22,24 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = shl i64 %index, 1
-  %next.gep = getelementptr float, float* %pSrc, i64 %0
-  %next.gep19 = getelementptr float, float* %pDst, i64 %index
-  %1 = bitcast float* %next.gep to <8 x float>*
-  %wide.vec = load <8 x float>, <8 x float>* %1, align 4
-  %2 = fmul fast <8 x float> %wide.vec, %wide.vec
-  %3 = shufflevector <8 x float> %2, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %4 = fmul fast <8 x float> %wide.vec, %wide.vec
-  %5 = shufflevector <8 x float> %4, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %6 = fadd fast <4 x float> %5, %3
-  %7 = bitcast float* %next.gep19 to <4 x float>*
-  store <4 x float> %6, <4 x float>* %7, align 4
+  %next.gep = getelementptr float, ptr %pSrc, i64 %0
+  %next.gep19 = getelementptr float, ptr %pDst, i64 %index
+  %wide.vec = load <8 x float>, ptr %next.gep, align 4
+  %1 = fmul fast <8 x float> %wide.vec, %wide.vec
+  %2 = shufflevector <8 x float> %1, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %3 = fmul fast <8 x float> %wide.vec, %wide.vec
+  %4 = shufflevector <8 x float> %3, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %5 = fadd fast <4 x float> %4, %2
+  store <4 x float> %5, ptr %next.gep19, align 4
   %index.next = add i64 %index, 4
-  %8 = icmp eq i64 %index.next, 1024
-  br i1 %8, label %while.end, label %vector.body
+  %6 = icmp eq i64 %index.next, 1024
+  br i1 %6, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body
   ret void
 }
 
-define void @vld3(float* nocapture readonly %pSrc, float* noalias nocapture %pDst, i32 %numSamples) {
+define void @vld3(ptr nocapture readonly %pSrc, ptr noalias nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: vld3:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -63,29 +61,27 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = mul i64 %index, 3
-  %next.gep = getelementptr float, float* %pSrc, i64 %0
-  %next.gep23 = getelementptr float, float* %pDst, i64 %index
-  %1 = bitcast float* %next.gep to <12 x float>*
-  %wide.vec = load <12 x float>, <12 x float>* %1, align 4
-  %2 = fmul fast <12 x float> %wide.vec, %wide.vec
-  %3 = shufflevector <12 x float> %2, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  %4 = fmul fast <12 x float> %wide.vec, %wide.vec
-  %5 = shufflevector <12 x float> %4, <12 x float> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
-  %6 = fadd fast <4 x float> %5, %3
-  %7 = fmul fast <12 x float> %wide.vec, %wide.vec
-  %8 = shufflevector <12 x float> %7, <12 x float> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
-  %9 = fadd fast <4 x float> %6, %8
-  %10 = bitcast float* %next.gep23 to <4 x float>*
-  store <4 x float> %9, <4 x float>* %10, align 4
+  %next.gep = getelementptr float, ptr %pSrc, i64 %0
+  %next.gep23 = getelementptr float, ptr %pDst, i64 %index
+  %wide.vec = load <12 x float>, ptr %next.gep, align 4
+  %1 = fmul fast <12 x float> %wide.vec, %wide.vec
+  %2 = shufflevector <12 x float> %1, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %3 = fmul fast <12 x float> %wide.vec, %wide.vec
+  %4 = shufflevector <12 x float> %3, <12 x float> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+  %5 = fadd fast <4 x float> %4, %2
+  %6 = fmul fast <12 x float> %wide.vec, %wide.vec
+  %7 = shufflevector <12 x float> %6, <12 x float> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
+  %8 = fadd fast <4 x float> %5, %7
+  store <4 x float> %8, ptr %next.gep23, align 4
   %index.next = add i64 %index, 4
-  %11 = icmp eq i64 %index.next, 1024
-  br i1 %11, label %while.end, label %vector.body
+  %9 = icmp eq i64 %index.next, 1024
+  br i1 %9, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body
   ret void
 }
 
-define void @vld4(float* nocapture readonly %pSrc, float* noalias nocapture %pDst, i32 %numSamples) {
+define void @vld4(ptr nocapture readonly %pSrc, ptr noalias nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: vld4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -109,33 +105,31 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = shl i64 %index, 2
-  %next.gep = getelementptr float, float* %pSrc, i64 %0
+  %next.gep = getelementptr float, ptr %pSrc, i64 %0
   %1 = shl i64 %index, 1
-  %2 = bitcast float* %next.gep to <16 x float>*
-  %wide.vec = load <16 x float>, <16 x float>* %2, align 4
-  %3 = fmul fast <16 x float> %wide.vec, %wide.vec
-  %4 = shufflevector <16 x float> %3, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %5 = fmul fast <16 x float> %wide.vec, %wide.vec
-  %6 = shufflevector <16 x float> %5, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
-  %7 = fadd fast <4 x float> %6, %4
-  %8 = fmul fast <16 x float> %wide.vec, %wide.vec
-  %9 = shufflevector <16 x float> %8, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
-  %10 = fmul fast <16 x float> %wide.vec, %wide.vec
-  %11 = shufflevector <16 x float> %10, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
-  %12 = fadd fast <4 x float> %11, %9
-  %13 = getelementptr inbounds float, float* %pDst, i64 %1
-  %14 = bitcast float* %13 to <8 x float>*
-  %interleaved.vec = shufflevector <4 x float> %7, <4 x float> %12, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x float> %interleaved.vec, <8 x float>* %14, align 4
+  %wide.vec = load <16 x float>, ptr %next.gep, align 4
+  %2 = fmul fast <16 x float> %wide.vec, %wide.vec
+  %3 = shufflevector <16 x float> %2, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %4 = fmul fast <16 x float> %wide.vec, %wide.vec
+  %5 = shufflevector <16 x float> %4, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
+  %6 = fadd fast <4 x float> %5, %3
+  %7 = fmul fast <16 x float> %wide.vec, %wide.vec
+  %8 = shufflevector <16 x float> %7, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
+  %9 = fmul fast <16 x float> %wide.vec, %wide.vec
+  %10 = shufflevector <16 x float> %9, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
+  %11 = fadd fast <4 x float> %10, %8
+  %12 = getelementptr inbounds float, ptr %pDst, i64 %1
+  %interleaved.vec = shufflevector <4 x float> %6, <4 x float> %11, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  store <8 x float> %interleaved.vec, ptr %12, align 4
   %index.next = add i64 %index, 4
-  %15 = icmp eq i64 %index.next, 1024
-  br i1 %15, label %while.end, label %vector.body
+  %13 = icmp eq i64 %index.next, 1024
+  br i1 %13, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body
   ret void
 }
 
-define void @twosrc(float* nocapture readonly %pSrc, float* nocapture readonly %pSrc2, float* noalias nocapture %pDst, i32 %numSamples) {
+define void @twosrc(ptr nocapture readonly %pSrc, ptr nocapture readonly %pSrc2, ptr noalias nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: twosrc:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -159,30 +153,27 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = shl i64 %index, 1
-  %next.gep = getelementptr float, float* %pSrc, i64 %0
+  %next.gep = getelementptr float, ptr %pSrc, i64 %0
   %1 = shl i64 %index, 1
-  %next.gep23 = getelementptr float, float* %pSrc2, i64 %1
-  %next.gep24 = getelementptr float, float* %pDst, i64 %index
-  %2 = bitcast float* %next.gep to <8 x float>*
-  %wide.vec = load <8 x float>, <8 x float>* %2, align 4
-  %3 = bitcast float* %next.gep23 to <8 x float>*
-  %wide.vec26 = load <8 x float>, <8 x float>* %3, align 4
+  %next.gep23 = getelementptr float, ptr %pSrc2, i64 %1
+  %next.gep24 = getelementptr float, ptr %pDst, i64 %index
+  %wide.vec = load <8 x float>, ptr %next.gep, align 4
+  %wide.vec26 = load <8 x float>, ptr %next.gep23, align 4
+  %2 = fmul fast <8 x float> %wide.vec26, %wide.vec
+  %3 = shufflevector <8 x float> %2, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %4 = fmul fast <8 x float> %wide.vec26, %wide.vec
-  %5 = shufflevector <8 x float> %4, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %6 = fmul fast <8 x float> %wide.vec26, %wide.vec
-  %7 = shufflevector <8 x float> %6, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %8 = fadd fast <4 x float> %7, %5
-  %9 = bitcast float* %next.gep24 to <4 x float>*
-  store <4 x float> %8, <4 x float>* %9, align 4
+  %5 = shufflevector <8 x float> %4, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %6 = fadd fast <4 x float> %5, %3
+  store <4 x float> %6, ptr %next.gep24, align 4
   %index.next = add i64 %index, 4
-  %10 = icmp eq i64 %index.next, 1024
-  br i1 %10, label %while.end, label %vector.body
+  %7 = icmp eq i64 %index.next, 1024
+  br i1 %7, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body
   ret void
 }
 
-define void @vld2_multiuse(float* nocapture readonly %pSrc, float* noalias nocapture %pDst, i32 %numSamples) {
+define void @vld2_multiuse(ptr nocapture readonly %pSrc, ptr noalias nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: vld2_multiuse:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -203,25 +194,23 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = shl i64 %index, 1
-  %next.gep = getelementptr float, float* %pSrc, i64 %0
-  %next.gep19 = getelementptr float, float* %pDst, i64 %index
-  %1 = bitcast float* %next.gep to <8 x float>*
-  %wide.vec = load <8 x float>, <8 x float>* %1, align 4
-  %2 = fmul fast <8 x float> %wide.vec, %wide.vec
-  %3 = shufflevector <8 x float> %2, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-  %4 = shufflevector <8 x float> %2, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-  %5 = fadd fast <4 x float> %4, %3
-  %6 = bitcast float* %next.gep19 to <4 x float>*
-  store <4 x float> %5, <4 x float>* %6, align 4
+  %next.gep = getelementptr float, ptr %pSrc, i64 %0
+  %next.gep19 = getelementptr float, ptr %pDst, i64 %index
+  %wide.vec = load <8 x float>, ptr %next.gep, align 4
+  %1 = fmul fast <8 x float> %wide.vec, %wide.vec
+  %2 = shufflevector <8 x float> %1, <8 x float> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  %3 = shufflevector <8 x float> %1, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+  %4 = fadd fast <4 x float> %3, %2
+  store <4 x float> %4, ptr %next.gep19, align 4
   %index.next = add i64 %index, 4
-  %7 = icmp eq i64 %index.next, 1024
-  br i1 %7, label %while.end, label %vector.body
+  %5 = icmp eq i64 %index.next, 1024
+  br i1 %5, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body
   ret void
 }
 
-define void @vld3_multiuse(float* nocapture readonly %pSrc, float* noalias nocapture %pDst, i32 %numSamples) {
+define void @vld3_multiuse(ptr nocapture readonly %pSrc, ptr noalias nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: vld3_multiuse:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -243,27 +232,25 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = mul i64 %index, 3
-  %next.gep = getelementptr float, float* %pSrc, i64 %0
-  %next.gep23 = getelementptr float, float* %pDst, i64 %index
-  %1 = bitcast float* %next.gep to <12 x float>*
-  %wide.vec = load <12 x float>, <12 x float>* %1, align 4
-  %2 = fmul fast <12 x float> %wide.vec, %wide.vec
-  %3 = shufflevector <12 x float> %2, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
-  %4 = shufflevector <12 x float> %2, <12 x float> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
-  %5 = fadd fast <4 x float> %4, %3
-  %6 = shufflevector <12 x float> %2, <12 x float> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
-  %7 = fadd fast <4 x float> %5, %6
-  %8 = bitcast float* %next.gep23 to <4 x float>*
-  store <4 x float> %7, <4 x float>* %8, align 4
+  %next.gep = getelementptr float, ptr %pSrc, i64 %0
+  %next.gep23 = getelementptr float, ptr %pDst, i64 %index
+  %wide.vec = load <12 x float>, ptr %next.gep, align 4
+  %1 = fmul fast <12 x float> %wide.vec, %wide.vec
+  %2 = shufflevector <12 x float> %1, <12 x float> undef, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+  %3 = shufflevector <12 x float> %1, <12 x float> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+  %4 = fadd fast <4 x float> %3, %2
+  %5 = shufflevector <12 x float> %1, <12 x float> undef, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
+  %6 = fadd fast <4 x float> %4, %5
+  store <4 x float> %6, ptr %next.gep23, align 4
   %index.next = add i64 %index, 4
-  %9 = icmp eq i64 %index.next, 1024
-  br i1 %9, label %while.end, label %vector.body
+  %7 = icmp eq i64 %index.next, 1024
+  br i1 %7, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body
   ret void
 }
 
-define void @vld4_multiuse(float* nocapture readonly %pSrc, float* noalias nocapture %pDst, i32 %numSamples) {
+define void @vld4_multiuse(ptr nocapture readonly %pSrc, ptr noalias nocapture %pDst, i32 %numSamples) {
 ; CHECK-LABEL: vld4_multiuse:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    mov x8, xzr
@@ -287,24 +274,22 @@ entry:
 vector.body:                                      ; preds = %vector.body, %entry
   %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
   %0 = shl i64 %index, 2
-  %next.gep = getelementptr float, float* %pSrc, i64 %0
+  %next.gep = getelementptr float, ptr %pSrc, i64 %0
   %1 = shl i64 %index, 1
-  %2 = bitcast float* %next.gep to <16 x float>*
-  %wide.vec = load <16 x float>, <16 x float>* %2, align 4
-  %3 = fmul fast <16 x float> %wide.vec, %wide.vec
-  %4 = shufflevector <16 x float> %3, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
-  %5 = shufflevector <16 x float> %3, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
-  %6 = fadd fast <4 x float> %5, %4
-  %7 = shufflevector <16 x float> %3, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
-  %8 = shufflevector <16 x float> %3, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
-  %9 = fadd fast <4 x float> %8, %7
-  %10 = getelementptr inbounds float, float* %pDst, i64 %1
-  %11 = bitcast float* %10 to <8 x float>*
-  %interleaved.vec = shufflevector <4 x float> %6, <4 x float> %9, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
-  store <8 x float> %interleaved.vec, <8 x float>* %11, align 4
+  %wide.vec = load <16 x float>, ptr %next.gep, align 4
+  %2 = fmul fast <16 x float> %wide.vec, %wide.vec
+  %3 = shufflevector <16 x float> %2, <16 x float> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+  %4 = shufflevector <16 x float> %2, <16 x float> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
+  %5 = fadd fast <4 x float> %4, %3
+  %6 = shufflevector <16 x float> %2, <16 x float> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
+  %7 = shufflevector <16 x float> %2, <16 x float> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
+  %8 = fadd fast <4 x float> %7, %6
+  %9 = getelementptr inbounds float, ptr %pDst, i64 %1
+  %interleaved.vec = shufflevector <4 x float> %5, <4 x float> %8, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  store <8 x float> %interleaved.vec, ptr %9, align 4
   %index.next = add i64 %index, 4
-  %12 = icmp eq i64 %index.next, 1024
-  br i1 %12, label %while.end, label %vector.body
+  %10 = icmp eq i64 %index.next, 1024
+  br i1 %10, label %while.end, label %vector.body
 
 while.end:                                        ; preds = %vector.body
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/volatile-combine.ll b/llvm/test/CodeGen/AArch64/volatile-combine.ll
index 32fea1990ce79..fe2e7b9ed0867 100644
--- a/llvm/test/CodeGen/AArch64/volatile-combine.ll
+++ b/llvm/test/CodeGen/AArch64/volatile-combine.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -mtriple=arm64-apple-ios %s -o - -stop-after=finalize-isel 2>&1 | FileCheck %s
 
-define void @foo(i64 %a, i64 %b, i32* %ptr) {
+define void @foo(i64 %a, i64 %b, ptr %ptr) {
 ; CHECK-LABEL: name: foo
 ; CHECK: STRWui {{.*}} (volatile store (s32) into %ir.ptr)
   %sum = add i64 %a, 1
   %sum.32 = trunc i64 %sum to i32
-  store volatile i32 %sum.32, i32* %ptr
+  store volatile i32 %sum.32, ptr %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/vselect-ext.ll b/llvm/test/CodeGen/AArch64/vselect-ext.ll
index 6f16bc02911e2..350595f0dc160 100644
--- a/llvm/test/CodeGen/AArch64/vselect-ext.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-ext.ll
@@ -330,7 +330,7 @@ define <16 x i32> @same_zext_used_in_cmp_ne_and_select_v8i32(<16 x i8> %a) {
 
 ; A variation of @same_zext_used_in_cmp_unsigned_pred_and_select, with with
 ; multiple users of the compare.
-define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_other_use(<16 x i8> %a, <16 x i64> %v, <16 x i64>* %ptr) {
+define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_other_use(<16 x i8> %a, <16 x i64> %v, ptr %ptr) {
 ; CHECK-LABEL: same_zext_used_in_cmp_unsigned_pred_and_select_other_use:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    mov.16b v16, v2
@@ -380,7 +380,7 @@ entry:
   %cmp = icmp ugt <16 x i8> %a, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
   %sel = select <16 x i1> %cmp, <16 x i32> %ext, <16 x i32> zeroinitializer
   %sel.2 = select <16 x i1> %cmp, <16 x i64> %v, <16 x i64> zeroinitializer
-  store <16 x i64> %sel.2, <16 x i64>* %ptr
+  store <16 x i64> %sel.2, ptr %ptr
   ret <16 x i32> %sel
 }
 
@@ -570,7 +570,7 @@ entry:
   ret <16 x i32> %sel
 }
 
-define void @extension_in_loop_v16i8_to_v16i32(i8* %src, i32* %dst) {
+define void @extension_in_loop_v16i8_to_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: extension_in_loop_v16i8_to_v16i32:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:  Lloh2:
@@ -625,15 +625,13 @@ entry:
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
-  %src.gep = getelementptr i8, i8* %src, i64 %iv
-  %src.gep.cast = bitcast i8* %src.gep to <16 x i8>*
-  %load = load <16 x i8>, <16 x i8>* %src.gep.cast
+  %src.gep = getelementptr i8, ptr %src, i64 %iv
+  %load = load <16 x i8>, ptr %src.gep
   %cmp = icmp sgt <16 x i8> %load,  <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %ext = zext <16 x i8> %load to <16 x i32>
   %sel = select <16 x i1> %cmp, <16 x i32> %ext, <16 x i32> zeroinitializer
-  %dst.gep = getelementptr i32, i32* %dst, i64 %iv
-  %dst.gep.cast = bitcast i32* %dst.gep to <16 x i32>*
-  store <16 x i32> %sel, <16 x i32>* %dst.gep.cast
+  %dst.gep = getelementptr i32, ptr %dst, i64 %iv
+  store <16 x i32> %sel, ptr %dst.gep
   %iv.next = add nuw i64 %iv, 16
   %ec = icmp eq i64 %iv.next, 128
   br i1 %ec, label %exit, label %loop
@@ -642,7 +640,7 @@ exit:
   ret void
 }
 
-define void @extension_in_loop_as_shuffle_v16i8_to_v16i32(i8* %src, i32* %dst) {
+define void @extension_in_loop_as_shuffle_v16i8_to_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: extension_in_loop_as_shuffle_v16i8_to_v16i32:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:  Lloh10:
@@ -697,16 +695,14 @@ entry:
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
-  %src.gep = getelementptr i8, i8* %src, i64 %iv
-  %src.gep.cast = bitcast i8* %src.gep to <16 x i8>*
-  %load = load <16 x i8>, <16 x i8>* %src.gep.cast
+  %src.gep = getelementptr i8, ptr %src, i64 %iv
+  %load = load <16 x i8>, ptr %src.gep
   %cmp = icmp sgt <16 x i8> %load,  <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %ext.shuf = shufflevector <16 x i8> %load, <16 x i8> zeroinitializer, <64 x i32> <i32 16, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 2, i32 16, i32 16, i32 16, i32 3, i32 16, i32 16, i32 16, i32 4, i32 16, i32 16, i32 16, i32 5, i32 16, i32 16, i32 16, i32 6, i32 16, i32 16, i32 16, i32 7, i32 16, i32 16, i32 16, i32 8, i32 16, i32 16, i32 16, i32 9, i32 16, i32 16, i32 16, i32 10, i32 16, i32 16, i32 16, i32 11, i32 16, i32 16, i32 16, i32 12, i32 16, i32 16, i32 16, i32 13, i32 16, i32 16, i32 16, i32 14, i32 16, i32 16, i32 16, i32 15>
   %ext = bitcast <64 x i8> %ext.shuf to <16 x i32>
   %sel = select <16 x i1> %cmp, <16 x i32> %ext, <16 x i32> zeroinitializer
-  %dst.gep = getelementptr i32, i32* %dst, i64 %iv
-  %dst.gep.cast = bitcast i32* %dst.gep to <16 x i32>*
-  store <16 x i32> %sel, <16 x i32>* %dst.gep.cast
+  %dst.gep = getelementptr i32, ptr %dst, i64 %iv
+  store <16 x i32> %sel, ptr %dst.gep
   %iv.next = add nuw i64 %iv, 16
   %ec = icmp eq i64 %iv.next, 128
   br i1 %ec, label %exit, label %loop
@@ -715,7 +711,7 @@ exit:
   ret void
 }
 
-define void @shuffle_in_loop_is_no_extend_v16i8_to_v16i32(i8* %src, i32* %dst) {
+define void @shuffle_in_loop_is_no_extend_v16i8_to_v16i32(ptr %src, ptr %dst) {
 ; CHECK-LABEL: shuffle_in_loop_is_no_extend_v16i8_to_v16i32:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:  Lloh18:
@@ -770,16 +766,14 @@ entry:
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
-  %src.gep = getelementptr i8, i8* %src, i64 %iv
-  %src.gep.cast = bitcast i8* %src.gep to <16 x i8>*
-  %load = load <16 x i8>, <16 x i8>* %src.gep.cast
+  %src.gep = getelementptr i8, ptr %src, i64 %iv
+  %load = load <16 x i8>, ptr %src.gep
   %cmp = icmp sgt <16 x i8> %load,  <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %ext.shuf = shufflevector <16 x i8> %load, <16 x i8> zeroinitializer, <64 x i32> <i32 1, i32 16, i32 16, i32 0, i32 16, i32 16, i32 16, i32 1, i32 16, i32 16, i32 16, i32 2, i32 16, i32 16, i32 16, i32 3, i32 16, i32 16, i32 16, i32 4, i32 16, i32 16, i32 16, i32 5, i32 16, i32 16, i32 16, i32 6, i32 16, i32 16, i32 16, i32 7, i32 16, i32 16, i32 16, i32 8, i32 16, i32 16, i32 16, i32 9, i32 16, i32 16, i32 16, i32 10, i32 16, i32 16, i32 16, i32 11, i32 16, i32 16, i32 16, i32 12, i32 16, i32 16, i32 16, i32 13, i32 16, i32 16, i32 16, i32 14, i32 16, i32 16, i32 16, i32 15>
   %ext = bitcast <64 x i8> %ext.shuf to <16 x i32>
   %sel = select <16 x i1> %cmp, <16 x i32> %ext, <16 x i32> zeroinitializer
-  %dst.gep = getelementptr i32, i32* %dst, i64 %iv
-  %dst.gep.cast = bitcast i32* %dst.gep to <16 x i32>*
-  store <16 x i32> %sel, <16 x i32>* %dst.gep.cast
+  %dst.gep = getelementptr i32, ptr %dst, i64 %iv
+  store <16 x i32> %sel, ptr %dst.gep
   %iv.next = add nuw i64 %iv, 16
   %ec = icmp eq i64 %iv.next, 128
   br i1 %ec, label %exit, label %loop

diff  --git a/llvm/test/CodeGen/AArch64/win-alloca-no-stack-probe.ll b/llvm/test/CodeGen/AArch64/win-alloca-no-stack-probe.ll
index 0ab161f8f27ab..3d60944051e1e 100644
--- a/llvm/test/CodeGen/AArch64/win-alloca-no-stack-probe.ll
+++ b/llvm/test/CodeGen/AArch64/win-alloca-no-stack-probe.ll
@@ -3,11 +3,11 @@
 define void @func(i64 %a) "no-stack-arg-probe" {
 entry:
   %0 = alloca i8, i64 %a, align 16
-  call void @func2(i8* nonnull %0)
+  call void @func2(ptr nonnull %0)
   ret void
 }
 
-declare void @func2(i8*)
+declare void @func2(ptr)
 
 ; CHECK: add [[REG1:x[0-9]+]], x0, #15
 ; CHECK-NOT: bl __chkstk

diff  --git a/llvm/test/CodeGen/AArch64/win-alloca.ll b/llvm/test/CodeGen/AArch64/win-alloca.ll
index e4d52ca990018..08f3fcdf02405 100644
--- a/llvm/test/CodeGen/AArch64/win-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/win-alloca.ll
@@ -5,11 +5,11 @@
 define void @func(i64 %a) {
 entry:
   %0 = alloca i8, i64 %a, align 16
-  call void @func2(i8* nonnull %0)
+  call void @func2(ptr nonnull %0)
   ret void
 }
 
-declare void @func2(i8*)
+declare void @func2(ptr)
 
 ; The -O0 version here ends up much less elegant, so just check the
 ; details of the optimized form, but check that -O0 at least emits the

diff  --git a/llvm/test/CodeGen/AArch64/win-tls.ll b/llvm/test/CodeGen/AArch64/win-tls.ll
index cec39a04e29a8..e448282b7c4cf 100644
--- a/llvm/test/CodeGen/AArch64/win-tls.ll
+++ b/llvm/test/CodeGen/AArch64/win-tls.ll
@@ -5,26 +5,26 @@
 @tlsVar64 = thread_local global i64 0
 
 define i32 @getVar() {
-  %1 = load i32, i32* @tlsVar
+  %1 = load i32, ptr @tlsVar
   ret i32 %1
 }
 
-define i32* @getPtr() {
-  ret i32* @tlsVar
+define ptr @getPtr() {
+  ret ptr @tlsVar
 }
 
 define void @setVar(i32 %val) {
-  store i32 %val, i32* @tlsVar
+  store i32 %val, ptr @tlsVar
   ret void
 }
 
 define i8 @getVar8() {
-  %1 = load i8, i8* @tlsVar8
+  %1 = load i8, ptr @tlsVar8
   ret i8 %1
 }
 
 define i64 @getVar64() {
-  %1 = load i64, i64* @tlsVar64
+  %1 = load i64, ptr @tlsVar64
   ret i64 %1
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/win64-no-uwtable.ll b/llvm/test/CodeGen/AArch64/win64-no-uwtable.ll
index 789620a21dd65..e0ef14ba23921 100644
--- a/llvm/test/CodeGen/AArch64/win64-no-uwtable.ll
+++ b/llvm/test/CodeGen/AArch64/win64-no-uwtable.ll
@@ -20,7 +20,7 @@ define dso_local void @SEHfilter() nounwind "frame-pointer"="all" {
 ; CHECK-NEXT:  .LBB0_2:                                // %if.end.i
 ; CHECK-NEXT:  bl      f
 ; CHECK-NEXT:  brk     #0x1
-  %1 = load i32, i32* undef, align 4
+  %1 = load i32, ptr undef, align 4
   tail call void @g()
   %tobool.i = icmp eq i32 %1, 0
   br i1 %tobool.i, label %if.end.i, label %exit

diff  --git a/llvm/test/CodeGen/AArch64/win64_vararg.ll b/llvm/test/CodeGen/AArch64/win64_vararg.ll
index 954b2f54499d9..cda1fb9bfeca4 100644
--- a/llvm/test/CodeGen/AArch64/win64_vararg.ll
+++ b/llvm/test/CodeGen/AArch64/win64_vararg.ll
@@ -16,20 +16,19 @@ define void @pass_va(i32 %count, ...) nounwind {
 ; CHECK-NEXT:    ldr x30, [sp], #80 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  call void @other_func(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  call void @other_func(ptr %ap2)
   ret void
 }
 
-declare void @other_func(i8*) local_unnamed_addr
+declare void @other_func(ptr) local_unnamed_addr
 
-declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_copy(i8*, i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
+declare void @llvm.va_copy(ptr, ptr) nounwind
 
-define i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind {
+define ptr @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, ...) nounwind {
 ; CHECK-LABEL: f9:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -39,14 +38,13 @@ define i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i6
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  ret i8* %ap2
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  ret ptr %ap2
 }
 
-define i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind {
+define ptr @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, ...) nounwind {
 ; CHECK-LABEL: f8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #16
@@ -56,14 +54,13 @@ define i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i6
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  ret i8* %ap2
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  ret ptr %ap2
 }
 
-define i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
+define ptr @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ...) nounwind {
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sub sp, sp, #32
@@ -74,11 +71,10 @@ define i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, ..
 ; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %ap2 = load i8*, i8** %ap, align 8
-  ret i8* %ap2
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %ap2 = load ptr, ptr %ap, align 8
+  ret ptr %ap2
 }
 
 define void @copy1(i64 %a0, ...) nounwind {
@@ -93,23 +89,21 @@ define void @copy1(i64 %a0, ...) nounwind {
 ; CHECK-NEXT:    stp x8, x8, [sp], #80
 ; CHECK-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %cp = alloca i8*, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  %cp1 = bitcast i8** %cp to i8*
-  call void @llvm.va_start(i8* %ap1)
-  call void @llvm.va_copy(i8* %cp1, i8* %ap1)
+  %ap = alloca ptr, align 8
+  %cp = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
+  call void @llvm.va_copy(ptr %cp, ptr %ap)
   ret void
 }
 
-declare void @llvm.va_end(i8*)
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.va_end(ptr)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
-declare i32 @__stdio_common_vsprintf(i64, i8*, i64, i8*, i8*, i8*) local_unnamed_addr #3
-declare i64* @__local_stdio_printf_options() local_unnamed_addr #4
+declare i32 @__stdio_common_vsprintf(i64, ptr, i64, ptr, ptr, ptr) local_unnamed_addr #3
+declare ptr @__local_stdio_printf_options() local_unnamed_addr #4
 
-define i32 @fp(i8*, i64, i8*, ...) local_unnamed_addr #6 {
+define i32 @fp(ptr, i64, ptr, ...) local_unnamed_addr #6 {
 ; CHECK-LABEL: fp:
 ; CHECK:       .seh_proc fp
 ; CHECK-NEXT:  // %bb.0:
@@ -152,25 +146,24 @@ define i32 @fp(i8*, i64, i8*, ...) local_unnamed_addr #6 {
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .seh_endfunclet
 ; CHECK-NEXT:    .seh_endproc
-  %4 = alloca i8*, align 8
-  %5 = bitcast i8** %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %5) #2
-  call void @llvm.va_start(i8* nonnull %5)
-  %6 = load i8*, i8** %4, align 8
-  %7 = call i64* @__local_stdio_printf_options() #2
-  %8 = load i64, i64* %7, align 8
-  %9 = or i64 %8, 2
-  %10 = call i32 @__stdio_common_vsprintf(i64 %9, i8* %0, i64 %1, i8* %2, i8* null, i8* %6) #2
-  %11 = icmp sgt i32 %10, -1
-  %12 = select i1 %11, i32 %10, i32 -1
-  call void @llvm.va_end(i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %5) #2
-  ret i32 %12
+  %4 = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %4) #2
+  call void @llvm.va_start(ptr nonnull %4)
+  %5 = load ptr, ptr %4, align 8
+  %6 = call ptr @__local_stdio_printf_options() #2
+  %7 = load i64, ptr %6, align 8
+  %8 = or i64 %7, 2
+  %9 = call i32 @__stdio_common_vsprintf(i64 %8, ptr %0, i64 %1, ptr %2, ptr null, ptr %5) #2
+  %10 = icmp sgt i32 %9, -1
+  %11 = select i1 %10, i32 %9, i32 -1
+  call void @llvm.va_end(ptr nonnull %4)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #2
+  ret i32 %11
 }
 
 attributes #6 = { "frame-pointer"="all" }
 
-define void @vla(i32, i8*, ...) local_unnamed_addr {
+define void @vla(i32, ptr, ...) local_unnamed_addr {
 ; CHECK-LABEL: vla:
 ; CHECK:       .seh_proc vla
 ; CHECK-NEXT:  // %bb.0:
@@ -226,29 +219,28 @@ define void @vla(i32, i8*, ...) local_unnamed_addr {
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .seh_endfunclet
 ; CHECK-NEXT:    .seh_endproc
-  %3 = alloca i8*, align 8
-  %4 = bitcast i8** %3 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %4) #5
-  call void @llvm.va_start(i8* nonnull %4)
-  %5 = zext i32 %0 to i64
-  %6 = call i8* @llvm.stacksave()
-  %7 = alloca i8, i64 %5, align 1
-  %8 = load i8*, i8** %3, align 8
-  %9 = sext i32 %0 to i64
-  %10 = call i64* @__local_stdio_printf_options()
-  %11 = load i64, i64* %10, align 8
-  %12 = or i64 %11, 2
-  %13 = call i32 @__stdio_common_vsprintf(i64 %12, i8* nonnull %7, i64 %9, i8* %1, i8* null, i8* %8)
-  call void @llvm.va_end(i8* nonnull %4)
-  call void @llvm.stackrestore(i8* %6)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %4) #5
+  %3 = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %3) #5
+  call void @llvm.va_start(ptr nonnull %3)
+  %4 = zext i32 %0 to i64
+  %5 = call ptr @llvm.stacksave()
+  %6 = alloca i8, i64 %4, align 1
+  %7 = load ptr, ptr %3, align 8
+  %8 = sext i32 %0 to i64
+  %9 = call ptr @__local_stdio_printf_options()
+  %10 = load i64, ptr %9, align 8
+  %11 = or i64 %10, 2
+  %12 = call i32 @__stdio_common_vsprintf(i64 %11, ptr nonnull %6, i64 %8, ptr %1, ptr null, ptr %7)
+  call void @llvm.va_end(ptr nonnull %3)
+  call void @llvm.stackrestore(ptr %5)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %3) #5
   ret void
 }
 
-declare i8* @llvm.stacksave()
-declare void @llvm.stackrestore(i8*)
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
 
-define i32 @snprintf(i8*, i64, i8*, ...) local_unnamed_addr #5 {
+define i32 @snprintf(ptr, i64, ptr, ...) local_unnamed_addr #5 {
 ; CHECK-LABEL: snprintf:
 ; CHECK:       .seh_proc snprintf
 ; CHECK-NEXT:  // %bb.0:
@@ -289,20 +281,19 @@ define i32 @snprintf(i8*, i64, i8*, ...) local_unnamed_addr #5 {
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .seh_endfunclet
 ; CHECK-NEXT:    .seh_endproc
-  %4 = alloca i8*, align 8
-  %5 = bitcast i8** %4 to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %5) #2
-  call void @llvm.va_start(i8* nonnull %5)
-  %6 = load i8*, i8** %4, align 8
-  %7 = call i64* @__local_stdio_printf_options() #2
-  %8 = load i64, i64* %7, align 8
-  %9 = or i64 %8, 2
-  %10 = call i32 @__stdio_common_vsprintf(i64 %9, i8* %0, i64 %1, i8* %2, i8* null, i8* %6) #2
-  %11 = icmp sgt i32 %10, -1
-  %12 = select i1 %11, i32 %10, i32 -1
-  call void @llvm.va_end(i8* nonnull %5)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %5) #2
-  ret i32 %12
+  %4 = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %4) #2
+  call void @llvm.va_start(ptr nonnull %4)
+  %5 = load ptr, ptr %4, align 8
+  %6 = call ptr @__local_stdio_printf_options() #2
+  %7 = load i64, ptr %6, align 8
+  %8 = or i64 %7, 2
+  %9 = call i32 @__stdio_common_vsprintf(i64 %8, ptr %0, i64 %1, ptr %2, ptr null, ptr %5) #2
+  %10 = icmp sgt i32 %9, -1
+  %11 = select i1 %10, i32 %9, i32 -1
+  call void @llvm.va_end(ptr nonnull %4)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #2
+  ret i32 %11
 }
 
 define void @fixed_params(i32, double, i32, double, i32, double, i32, double, i32, double) nounwind {

diff  --git a/llvm/test/CodeGen/AArch64/win64_vararg_float.ll b/llvm/test/CodeGen/AArch64/win64_vararg_float.ll
index 2c05432542ec1..6bf37abf8cfa4 100644
--- a/llvm/test/CodeGen/AArch64/win64_vararg_float.ll
+++ b/llvm/test/CodeGen/AArch64/win64_vararg_float.ll
@@ -37,22 +37,21 @@ define void @float_va_fn(float %a, i32 %b, ...) nounwind {
 ; O0-NEXT:    add sp, sp, #80
 ; O0-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %0 = bitcast i8** %ap to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %1 = load i8*, i8** %ap, align 8
-  call void @f_va_list(float %a, i8* %1)
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  %ap = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ap)
+  call void @llvm.va_start(ptr nonnull %ap)
+  %0 = load ptr, ptr %ap, align 8
+  call void @f_va_list(float %a, ptr %0)
+  call void @llvm.va_end(ptr nonnull %ap)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.va_start(i8*)
-declare void @f_va_list(float, i8*)
-declare void @llvm.va_end(i8*)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.va_start(ptr)
+declare void @f_va_list(float, ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 define void @double_va_fn(double %a, i32 %b, ...) nounwind {
 ; DAGISEL-LABEL: double_va_fn:
@@ -88,18 +87,17 @@ define void @double_va_fn(double %a, i32 %b, ...) nounwind {
 ; O0-NEXT:    add sp, sp, #80
 ; O0-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %0 = bitcast i8** %ap to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %1 = load i8*, i8** %ap, align 8
-  call void @d_va_list(double %a, i8* %1)
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  %ap = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ap)
+  call void @llvm.va_start(ptr nonnull %ap)
+  %0 = load ptr, ptr %ap, align 8
+  call void @d_va_list(double %a, ptr %0)
+  call void @llvm.va_end(ptr nonnull %ap)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap)
   ret void
 }
 
-declare void @d_va_list(double, i8*)
+declare void @d_va_list(double, ptr)
 
 define void @call_f_va() nounwind {
 ; DAGISEL-LABEL: call_f_va:

diff  --git a/llvm/test/CodeGen/AArch64/win64_vararg_float_cc.ll b/llvm/test/CodeGen/AArch64/win64_vararg_float_cc.ll
index 1aa62857ff5ff..8546b39e98f0a 100644
--- a/llvm/test/CodeGen/AArch64/win64_vararg_float_cc.ll
+++ b/llvm/test/CodeGen/AArch64/win64_vararg_float_cc.ll
@@ -37,22 +37,21 @@ define win64cc void @float_va_fn(float %a, i32 %b, ...) nounwind {
 ; O0-NEXT:    add sp, sp, #80
 ; O0-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %0 = bitcast i8** %ap to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %1 = load i8*, i8** %ap, align 8
-  call void @f_va_list(float %a, i8* %1)
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  %ap = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ap)
+  call void @llvm.va_start(ptr nonnull %ap)
+  %0 = load ptr, ptr %ap, align 8
+  call void @f_va_list(float %a, ptr %0)
+  call void @llvm.va_end(ptr nonnull %ap)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap)
   ret void
 }
 
-declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
-declare void @llvm.va_start(i8*)
-declare void @f_va_list(float, i8*)
-declare void @llvm.va_end(i8*)
-declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.va_start(ptr)
+declare void @f_va_list(float, ptr)
+declare void @llvm.va_end(ptr)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 define win64cc void @double_va_fn(double %a, i32 %b, ...) nounwind {
 ; DAGISEL-LABEL: double_va_fn:
@@ -88,18 +87,17 @@ define win64cc void @double_va_fn(double %a, i32 %b, ...) nounwind {
 ; O0-NEXT:    add sp, sp, #80
 ; O0-NEXT:    ret
 entry:
-  %ap = alloca i8*, align 8
-  %0 = bitcast i8** %ap to i8*
-  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
-  call void @llvm.va_start(i8* nonnull %0)
-  %1 = load i8*, i8** %ap, align 8
-  call void @d_va_list(double %a, i8* %1)
-  call void @llvm.va_end(i8* nonnull %0)
-  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  %ap = alloca ptr, align 8
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ap)
+  call void @llvm.va_start(ptr nonnull %ap)
+  %0 = load ptr, ptr %ap, align 8
+  call void @d_va_list(double %a, ptr %0)
+  call void @llvm.va_end(ptr nonnull %ap)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ap)
   ret void
 }
 
-declare void @d_va_list(double, i8*)
+declare void @d_va_list(double, ptr)
 
 define void @call_f_va() nounwind {
 ; DAGISEL-LABEL: call_f_va:

diff  --git a/llvm/test/CodeGen/AArch64/windows-SEH-support.ll b/llvm/test/CodeGen/AArch64/windows-SEH-support.ll
index 499ae782beb1c..21e96dd1052b6 100644
--- a/llvm/test/CodeGen/AArch64/windows-SEH-support.ll
+++ b/llvm/test/CodeGen/AArch64/windows-SEH-support.ll
@@ -4,7 +4,7 @@ declare dllimport void @f() local_unnamed_addr
 
 declare dso_local i32 @__C_specific_handler(...)
 
-define hidden swiftcc void @g() unnamed_addr personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define hidden swiftcc void @g() unnamed_addr personality ptr @__C_specific_handler {
 entry:
   invoke void @f() to label %__try.cont unwind label %catch.dispatch
 
@@ -12,14 +12,14 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %__except] unwind to caller
 
 __except:
-  %1 = catchpad within %0 [i8* null]              ; preds = %catch.dispatch
+  %1 = catchpad within %0 [ptr null]              ; preds = %catch.dispatch
   catchret from %1 to label %__try.cont
 
 __try.cont:                                       ; preds = %__except, %entry
   ret void
 }
 
-define hidden fastcc void @h() unnamed_addr personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) {
+define hidden fastcc void @h() unnamed_addr personality ptr @__C_specific_handler {
 entry:
   invoke void @f() to label %__try.cont unwind label %catch.dispatch
 
@@ -27,7 +27,7 @@ catch.dispatch:                                   ; preds = %entry
   %0 = catchswitch within none [label %__except] unwind to caller
 
 __except:                                         ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null]
+  %1 = catchpad within %0 [ptr null]
   catchret from %1 to label %__try.cont
 
 __try.cont:                                       ; preds = %__except, %entry

diff  --git a/llvm/test/CodeGen/AArch64/windows-extern-weak.ll b/llvm/test/CodeGen/AArch64/windows-extern-weak.ll
index dbd17e35f44a6..dca8e9e3eab8e 100644
--- a/llvm/test/CodeGen/AArch64/windows-extern-weak.ll
+++ b/llvm/test/CodeGen/AArch64/windows-extern-weak.ll
@@ -19,7 +19,7 @@ define void @func() {
 ; CHECK-NEXT: .seh_endepilogue
 ; CHECK-NEXT: ret
 
-  br i1 icmp ne (void ()* @weakfunc, void ()* null), label %1, label %2
+  br i1 icmp ne (ptr @weakfunc, ptr null), label %1, label %2
 
 1:
   call void @weakfunc()

diff  --git a/llvm/test/CodeGen/AArch64/wineh-mingw.ll b/llvm/test/CodeGen/AArch64/wineh-mingw.ll
index d22c61fca7575..6e8a8ad69b5b4 100644
--- a/llvm/test/CodeGen/AArch64/wineh-mingw.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-mingw.ll
@@ -2,34 +2,34 @@
 ; RUN: llc < %s -mtriple=aarch64-pc-mingw32 -filetype=obj | llvm-readobj -S - | FileCheck %s -check-prefix=WINEH-SECTIONS
 
 ; Check emission of eh handler and handler data
-declare i32 @_d_eh_personality(i32, i32, i64, i8*, i8*)
-declare void @_d_eh_resume_unwind(i8*)
+declare i32 @_d_eh_personality(i32, i32, i64, ptr, ptr)
+declare void @_d_eh_resume_unwind(ptr)
 
 declare i32 @bar()
 
-define i32 @foo4() #0 personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality {
+define i32 @foo4() #0 personality ptr @_d_eh_personality {
 entry:
   %step = alloca i32, align 4
-  store i32 0, i32* %step
-  %tmp = load i32, i32* %step
+  store i32 0, ptr %step
+  %tmp = load i32, ptr %step
 
   %tmp1 = invoke i32 @bar()
           to label %finally unwind label %landingpad
 
 finally:
-  store i32 1, i32* %step
+  store i32 1, ptr %step
   br label %endtryfinally
 
 landingpad:
-  %landing_pad = landingpad { i8*, i32 }
+  %landing_pad = landingpad { ptr, i32 }
           cleanup
-  %tmp3 = extractvalue { i8*, i32 } %landing_pad, 0
-  store i32 2, i32* %step
-  call void @_d_eh_resume_unwind(i8* %tmp3)
+  %tmp3 = extractvalue { ptr, i32 } %landing_pad, 0
+  store i32 2, ptr %step
+  call void @_d_eh_resume_unwind(ptr %tmp3)
   unreachable
 
 endtryfinally:
-  %tmp10 = load i32, i32* %step
+  %tmp10 = load i32, ptr %step
   ret i32 %tmp10
 }
 ; WINEH-LABEL: foo4:

diff  --git a/llvm/test/CodeGen/AArch64/wineh-try-catch-cbz.ll b/llvm/test/CodeGen/AArch64/wineh-try-catch-cbz.ll
index ae11082e88a6d..1d1e262ac014e 100644
--- a/llvm/test/CodeGen/AArch64/wineh-try-catch-cbz.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-try-catch-cbz.ll
@@ -17,7 +17,7 @@ target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.11.0"
 
 ; Function Attrs: uwtable
-define dso_local void @"?f@@YAXH at Z"(i32 %x) local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?f@@YAXH at Z"(i32 %x) local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 {
 entry:
   %cmp = icmp eq i32 %x, 0
   br i1 %cmp, label %try.cont, label %if.then
@@ -30,7 +30,7 @@ catch.dispatch:                                   ; preds = %if.then
   %0 = catchswitch within none [label %catch] unwind to caller
 
 catch:                                            ; preds = %catch.dispatch
-  %1 = catchpad within %0 [i8* null, i32 64, i8* null]
+  %1 = catchpad within %0 [ptr null, i32 64, ptr null]
   catchret from %1 to label %try.cont
 
 try.cont:                                         ; preds = %entry, %if.then, %catch

diff  --git a/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll b/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
index 5573a8215babc..361dc8c61e161 100644
--- a/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
@@ -23,30 +23,29 @@
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.11.0"
 
-define dso_local void @"?a@@YAXXZ"(i64 %p1) personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?a@@YAXXZ"(i64 %p1) personality ptr @__CxxFrameHandler3 {
 entry:
   %a = alloca i32, align 16
-  %0 = bitcast i32* %a to i8*  
-  store i32 305419896, i32* %a, align 16
-  invoke void @"?bb@@YAXPEAHH at Z"(i32* nonnull %a, i32* null)
+  store i32 305419896, ptr %a, align 16
+  invoke void @"?bb@@YAXPEAHH at Z"(ptr nonnull %a, ptr null)
           to label %try.cont unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %entry
-  %1 = catchswitch within none [label %catch] unwind to caller
+  %0 = catchswitch within none [label %catch] unwind to caller
 
 catch:                                            ; preds = %catch.dispatch
-  %2 = catchpad within %1 [i8* null, i32 64, i8* null]
-  call void @"?bb@@YAXPEAHH at Z"(i32* nonnull %a, i32* null) [ "funclet"(token %2) ]
-  catchret from %2 to label %try.cont
+  %1 = catchpad within %0 [ptr null, i32 64, ptr null]
+  call void @"?bb@@YAXPEAHH at Z"(ptr nonnull %a, ptr null) [ "funclet"(token %1) ]
+  catchret from %1 to label %try.cont
 
 try.cont:                                         ; preds = %entry, %catch
   call void @"?cc@@YAXXZ"()
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1)
 
-declare dso_local void @"?bb@@YAXPEAHH at Z"(i32*, i32*)
+declare dso_local void @"?bb@@YAXPEAHH at Z"(ptr, ptr)
 
 declare dso_local i32 @__CxxFrameHandler3(...)
 

diff  --git a/llvm/test/CodeGen/AArch64/wineh-try-catch-realign.ll b/llvm/test/CodeGen/AArch64/wineh-try-catch-realign.ll
index 85b3631c459f7..484e1879549c4 100644
--- a/llvm/test/CodeGen/AArch64/wineh-try-catch-realign.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-try-catch-realign.ll
@@ -35,32 +35,30 @@
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.11.0"
 
-define dso_local void @"?a@@YAXXZ"() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?a@@YAXXZ"() personality ptr @__CxxFrameHandler3 {
 entry:
   %a = alloca [100 x i32], align 64
-  %0 = bitcast [100 x i32]* %a to i8*  
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 64 %0, i8 0, i64 400, i1 false)
-  %1 = getelementptr inbounds [100 x i32], [100 x i32]* %a, i64 0, i64 0
-  store i32 305419896, i32* %1, align 64
-  invoke void @"?bb@@YAXPEAHH at Z"(i32* nonnull %1, i32 1)
+  call void @llvm.memset.p0.i64(ptr nonnull align 64 %a, i8 0, i64 400, i1 false)
+  store i32 305419896, ptr %a, align 64
+  invoke void @"?bb@@YAXPEAHH at Z"(ptr nonnull %a, i32 1)
           to label %try.cont unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %entry
-  %2 = catchswitch within none [label %catch] unwind to caller
+  %0 = catchswitch within none [label %catch] unwind to caller
 
 catch:                                            ; preds = %catch.dispatch
-  %3 = catchpad within %2 [i8* null, i32 64, i8* null]
-  call void @"?bb@@YAXPEAHH at Z"(i32* nonnull %1, i32 0) [ "funclet"(token %3) ]
-  catchret from %3 to label %try.cont
+  %1 = catchpad within %0 [ptr null, i32 64, ptr null]
+  call void @"?bb@@YAXPEAHH at Z"(ptr nonnull %a, i32 0) [ "funclet"(token %1) ]
+  catchret from %1 to label %try.cont
 
 try.cont:                                         ; preds = %entry, %catch
   call void @"?cc@@YAXXZ"()
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1)
 
-declare dso_local void @"?bb@@YAXPEAHH at Z"(i32*, i32)
+declare dso_local void @"?bb@@YAXPEAHH at Z"(ptr, i32)
 
 declare dso_local i32 @__CxxFrameHandler3(...)
 

diff  --git a/llvm/test/CodeGen/AArch64/wineh-try-catch-vla.ll b/llvm/test/CodeGen/AArch64/wineh-try-catch-vla.ll
index c4974636d8796..46e5f2403f2ae 100644
--- a/llvm/test/CodeGen/AArch64/wineh-try-catch-vla.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-try-catch-vla.ll
@@ -25,31 +25,30 @@
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.11.0"
 
-define dso_local void @"?a@@YAXXZ"(i64 %p1) personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?a@@YAXXZ"(i64 %p1) personality ptr @__CxxFrameHandler3 {
 entry:
   %a = alloca i32, i64 %p1, align 16
-  %0 = bitcast i32* %a to i8*  
-  call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %0, i8 0, i64 400, i1 false)
-  store i32 305419896, i32* %a, align 16
-  invoke void @"?bb@@YAXPEAHH at Z"(i32* nonnull %a, i32* null)
+  call void @llvm.memset.p0.i64(ptr nonnull align 16 %a, i8 0, i64 400, i1 false)
+  store i32 305419896, ptr %a, align 16
+  invoke void @"?bb@@YAXPEAHH at Z"(ptr nonnull %a, ptr null)
           to label %try.cont unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %entry
-  %1 = catchswitch within none [label %catch] unwind to caller
+  %0 = catchswitch within none [label %catch] unwind to caller
 
 catch:                                            ; preds = %catch.dispatch
-  %2 = catchpad within %1 [i8* null, i32 64, i8* null]
-  call void @"?bb@@YAXPEAHH at Z"(i32* nonnull %a, i32* %a) [ "funclet"(token %2) ]
-  catchret from %2 to label %try.cont
+  %1 = catchpad within %0 [ptr null, i32 64, ptr null]
+  call void @"?bb@@YAXPEAHH at Z"(ptr nonnull %a, ptr %a) [ "funclet"(token %1) ]
+  catchret from %1 to label %try.cont
 
 try.cont:                                         ; preds = %entry, %catch
   call void @"?cc@@YAXXZ"()
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1)
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1)
 
-declare dso_local void @"?bb@@YAXPEAHH at Z"(i32*, i32*)
+declare dso_local void @"?bb@@YAXPEAHH at Z"(ptr, ptr)
 
 declare dso_local i32 @__CxxFrameHandler3(...)
 

diff  --git a/llvm/test/CodeGen/AArch64/wineh-try-catch.ll b/llvm/test/CodeGen/AArch64/wineh-try-catch.ll
index e60c7b97c4e7a..af9491aaf346d 100644
--- a/llvm/test/CodeGen/AArch64/wineh-try-catch.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-try-catch.ll
@@ -102,7 +102,7 @@
 target datalayout = "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-unknown-windows-msvc19.11.0"
 
-%rtti.TypeDescriptor2 = type { i8**, i8*, [3 x i8] }
+%rtti.TypeDescriptor2 = type { ptr, ptr, [3 x i8] }
 %eh.CatchableType = type { i32, i32, i32, i32, i32, i32, i32 }
 %eh.CatchableTypeArray.1 = type { i32, [1 x i32] }
 %eh.ThrowInfo = type { i32, i32, i32, i32 }
@@ -115,81 +115,77 @@ $_CTA1H = comdat any
 
 $_TI1H = comdat any
 
-@"??_7type_info@@6B@" = external constant i8*
-@"??_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"??_7type_info@@6B@", i8* null, [3 x i8] c".H\00" }, comdat
+@"??_7type_info@@6B@" = external constant ptr
+@"??_R0H at 8" = linkonce_odr global %rtti.TypeDescriptor2 { ptr @"??_7type_info@@6B@", ptr null, [3 x i8] c".H\00" }, comdat
 @__ImageBase = external dso_local constant i8
-@"_CT??_R0H at 84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 1, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%rtti.TypeDescriptor2* @"??_R0H at 8" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32), i32 0, i32 -1, i32 0, i32 4, i32 0 }, section ".xdata", comdat
- at _CTA1H = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%eh.CatchableType* @"_CT??_R0H at 84" to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32)] }, section ".xdata", comdat
- at _TI1H = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (%eh.CatchableTypeArray.1* @_CTA1H to i64), i64 ptrtoint (i8* @__ImageBase to i64)) to i32) }, section ".xdata", comdat
+@"_CT??_R0H at 84" = linkonce_odr unnamed_addr constant %eh.CatchableType { i32 1, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"??_R0H at 8" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32), i32 0, i32 -1, i32 0, i32 4, i32 0 }, section ".xdata", comdat
+ at _CTA1H = linkonce_odr unnamed_addr constant %eh.CatchableTypeArray.1 { i32 1, [1 x i32] [i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @"_CT??_R0H at 84" to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32)] }, section ".xdata", comdat
+ at _TI1H = linkonce_odr unnamed_addr constant %eh.ThrowInfo { i32 0, i32 0, i32 0, i32 trunc (i64 sub nuw nsw (i64 ptrtoint (ptr @_CTA1H to i64), i64 ptrtoint (ptr @__ImageBase to i64)) to i32) }, section ".xdata", comdat
 
 ; Function Attrs: noinline optnone
-define dso_local i32 @"?func@@YAHXZ"() #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local i32 @"?func@@YAHXZ"() #0 personality ptr @__CxxFrameHandler3 {
 entry:
   %B = alloca [50 x i32], align 4
   %x = alloca i32, align 4
   %tmp = alloca i32, align 4
   %i = alloca i32, align 4
   %C = alloca [100 x i32], align 4
-  store i32 1, i32* %x, align 4
-  %arraydecay = getelementptr inbounds [50 x i32], [50 x i32]* %B, i32 0, i32 0
-  call void @"?init@@YAXPEAH at Z"(i32* %arraydecay)
+  store i32 1, ptr %x, align 4
+  call void @"?init@@YAXPEAH at Z"(ptr %B)
   %call = invoke i32 @"?func2@@YAHXZ"()
           to label %invoke.cont unwind label %catch.dispatch
 
 invoke.cont:                                      ; preds = %entry
-  store i32 %call, i32* %tmp, align 4
-  %0 = bitcast i32* %tmp to i8*
-  invoke void @_CxxThrowException(i8* %0, %eh.ThrowInfo* @_TI1H) #2
+  store i32 %call, ptr %tmp, align 4
+  invoke void @_CxxThrowException(ptr %tmp, ptr @_TI1H) #2
           to label %unreachable unwind label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %invoke.cont, %entry
-  %1 = catchswitch within none [label %catch] unwind to caller
+  %0 = catchswitch within none [label %catch] unwind to caller
 
 catch:                                            ; preds = %catch.dispatch
-  %2 = catchpad within %1 [%rtti.TypeDescriptor2* @"??_R0H at 8", i32 0, i32* %i]
-  %arraydecay1 = getelementptr inbounds [100 x i32], [100 x i32]* %C, i32 0, i32 0
-  call void @"?init@@YAXPEAH at Z"(i32* %arraydecay1) [ "funclet"(token %2) ]
-  %arraydecay2 = getelementptr inbounds [50 x i32], [50 x i32]* %B, i32 0, i32 0
-  call void @"?init2@@YAXPEAH at Z"(i32* %arraydecay2) [ "funclet"(token %2) ]
-  %3 = load i32, i32* %i, align 4
-  %idxprom = sext i32 %3 to i64
-  %arrayidx = getelementptr inbounds [50 x i32], [50 x i32]* %B, i64 0, i64 %idxprom
-  %4 = load i32, i32* %arrayidx, align 4
-  %5 = load i32, i32* %i, align 4
-  %idxprom3 = sext i32 %5 to i64
-  %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %C, i64 0, i64 %idxprom3
-  %6 = load i32, i32* %arrayidx4, align 4
-  %add = add nsw i32 %4, %6
-  %7 = load i32, i32* %i, align 4
-  %8 = load i32, i32* %i, align 4
-  %mul = mul nsw i32 %7, %8
+  %1 = catchpad within %0 [ptr @"??_R0H at 8", i32 0, ptr %i]
+  call void @"?init@@YAXPEAH at Z"(ptr %C) [ "funclet"(token %1) ]
+  call void @"?init2@@YAXPEAH at Z"(ptr %B) [ "funclet"(token %1) ]
+  %2 = load i32, ptr %i, align 4
+  %idxprom = sext i32 %2 to i64
+  %arrayidx = getelementptr inbounds [50 x i32], ptr %B, i64 0, i64 %idxprom
+  %3 = load i32, ptr %arrayidx, align 4
+  %4 = load i32, ptr %i, align 4
+  %idxprom3 = sext i32 %4 to i64
+  %arrayidx4 = getelementptr inbounds [100 x i32], ptr %C, i64 0, i64 %idxprom3
+  %5 = load i32, ptr %arrayidx4, align 4
+  %add = add nsw i32 %3, %5
+  %6 = load i32, ptr %i, align 4
+  %7 = load i32, ptr %i, align 4
+  %mul = mul nsw i32 %6, %7
   %add5 = add nsw i32 %add, %mul
-  store i32 %add5, i32* %x, align 4
-  catchret from %2 to label %catchret.dest
+  store i32 %add5, ptr %x, align 4
+  catchret from %1 to label %catchret.dest
 
 catchret.dest:                                    ; preds = %catch
   br label %try.cont
 
 try.cont:                                         ; preds = %catchret.dest
-  %arrayidx6 = getelementptr inbounds [50 x i32], [50 x i32]* %B, i64 0, i64 2
-  %9 = load i32, i32* %arrayidx6, align 4
-  %10 = load i32, i32* %x, align 4
-  %add7 = add nsw i32 %9, %10
+  %arrayidx6 = getelementptr inbounds [50 x i32], ptr %B, i64 0, i64 2
+  %8 = load i32, ptr %arrayidx6, align 4
+  %9 = load i32, ptr %x, align 4
+  %add7 = add nsw i32 %8, %9
   ret i32 %add7
 
 unreachable:                                      ; preds = %invoke.cont
   unreachable
 }
 
-declare dso_local void @"?init@@YAXPEAH at Z"(i32*)
+declare dso_local void @"?init@@YAXPEAH at Z"(ptr)
 
 declare dso_local i32 @"?func2@@YAHXZ"()
 
 declare dso_local i32 @__CxxFrameHandler3(...)
 
-declare dllimport void @_CxxThrowException(i8*, %eh.ThrowInfo*)
+declare dllimport void @_CxxThrowException(ptr, ptr)
 
-declare dso_local void @"?init2@@YAXPEAH at Z"(i32*)
+declare dso_local void @"?init2@@YAXPEAH at Z"(ptr)
 
 attributes #0 = { noinline optnone }
 attributes #2 = { noreturn }

diff  --git a/llvm/test/CodeGen/AArch64/wineh-unwindhelp-via-fp.ll b/llvm/test/CodeGen/AArch64/wineh-unwindhelp-via-fp.ll
index 6ec78087020c4..8317babad693a 100644
--- a/llvm/test/CodeGen/AArch64/wineh-unwindhelp-via-fp.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-unwindhelp-via-fp.ll
@@ -46,24 +46,24 @@ target triple = "aarch64-pc-windows-msvc19.25.28611"
 ; }
 
 %struct.A = type { [4 x i32], [16 x i8] }
-declare dso_local %struct.A* @"??0A@@QEAA at XZ"(%struct.A* returned %0)
-declare dso_local void @"??1A@@QEAA at XZ"(%struct.A* %0)
+declare dso_local ptr @"??0A@@QEAA at XZ"(ptr returned %0)
+declare dso_local void @"??1A@@QEAA at XZ"(ptr %0)
 declare dso_local i32 @__CxxFrameHandler3(...)
 declare dso_local void @"?func3@@YAXXZ"()
 
 ; Function Attrs: noinline optnone uwtable
-define dso_local void @"?func2@@YAXXZ"() #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+define dso_local void @"?func2@@YAXXZ"() #0 personality ptr @__CxxFrameHandler3 {
   %1 = alloca %struct.A, align 32
-  %2 = call %struct.A* @"??0A@@QEAA at XZ"(%struct.A* %1) #3
+  %2 = call ptr @"??0A@@QEAA at XZ"(ptr %1) #3
   invoke void @"?func3@@YAXXZ"()
           to label %3 unwind label %4
 
 3:                                                ; preds = %0
-  call void @"??1A@@QEAA at XZ"(%struct.A* %1) #3
+  call void @"??1A@@QEAA at XZ"(ptr %1) #3
   ret void
 
 4:                                                ; preds = %0
   %5 = cleanuppad within none []
-  call void @"??1A@@QEAA at XZ"(%struct.A* %1) #3 [ "funclet"(token %5) ]
+  call void @"??1A@@QEAA at XZ"(ptr %1) #3 [ "funclet"(token %5) ]
   cleanupret from %5 unwind to caller
 }

diff  --git a/llvm/test/CodeGen/AArch64/wrong_debug_loc_after_regalloc.ll b/llvm/test/CodeGen/AArch64/wrong_debug_loc_after_regalloc.ll
index 83d61d7178996..b5f09d46a75a5 100644
--- a/llvm/test/CodeGen/AArch64/wrong_debug_loc_after_regalloc.ll
+++ b/llvm/test/CodeGen/AArch64/wrong_debug_loc_after_regalloc.ll
@@ -53,30 +53,30 @@ target triple = "aarch64-unknown-linux"
 @array = dso_local local_unnamed_addr global [256 x i32] zeroinitializer, align 4, !dbg !0
 
 ; Function Attrs: norecurse
-define dso_local i32 @main(i32 %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 !dbg !14 {
+define dso_local i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 !dbg !14 {
 entry:
   call void @llvm.dbg.value(metadata i32 %argc, metadata !21, metadata !DIExpression()), !dbg !36
-  call void @llvm.dbg.value(metadata i8** %argv, metadata !22, metadata !DIExpression()), !dbg !36
+  call void @llvm.dbg.value(metadata ptr %argv, metadata !22, metadata !DIExpression()), !dbg !36
   call void @llvm.dbg.value(metadata i32 56, metadata !23, metadata !DIExpression()), !dbg !36
-  %0 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 1), align 4, !dbg !37
+  %0 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 1), align 4, !dbg !37
   call void @llvm.dbg.value(metadata i32 %0, metadata !24, metadata !DIExpression()), !dbg !36
-  %1 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 2), align 4, !dbg !42
+  %1 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 2), align 4, !dbg !42
   call void @llvm.dbg.value(metadata i32 %1, metadata !25, metadata !DIExpression()), !dbg !36
-  %2 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 3), align 4, !dbg !43
+  %2 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 3), align 4, !dbg !43
   call void @llvm.dbg.value(metadata i32 %2, metadata !26, metadata !DIExpression()), !dbg !36
-  %3 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 4), align 4, !dbg !44
+  %3 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 4), align 4, !dbg !44
   call void @llvm.dbg.value(metadata i32 %3, metadata !27, metadata !DIExpression()), !dbg !36
-  %4 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 5), align 4, !dbg !45
+  %4 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 5), align 4, !dbg !45
   call void @llvm.dbg.value(metadata i32 %4, metadata !28, metadata !DIExpression()), !dbg !36
-  %5 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 6), align 4, !dbg !46
+  %5 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 6), align 4, !dbg !46
   call void @llvm.dbg.value(metadata i32 %5, metadata !29, metadata !DIExpression()), !dbg !36
-  %6 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 7), align 4, !dbg !47
+  %6 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 7), align 4, !dbg !47
   call void @llvm.dbg.value(metadata i32 %6, metadata !30, metadata !DIExpression()), !dbg !36
-  %7 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 8), align 4, !dbg !48
+  %7 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 8), align 4, !dbg !48
   call void @llvm.dbg.value(metadata i32 %7, metadata !31, metadata !DIExpression()), !dbg !36
-  %8 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 9), align 4, !dbg !49
+  %8 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 9), align 4, !dbg !49
   call void @llvm.dbg.value(metadata i32 %8, metadata !32, metadata !DIExpression()), !dbg !36
-  %9 = load i32, i32* getelementptr inbounds ([256 x i32], [256 x i32]* @array, i64 0, i64 10), align 4, !dbg !50
+  %9 = load i32, ptr getelementptr inbounds ([256 x i32], ptr @array, i64 0, i64 10), align 4, !dbg !50
   call void @llvm.dbg.value(metadata i32 %9, metadata !33, metadata !DIExpression()), !dbg !36
   call void @llvm.dbg.value(metadata i32 0, metadata !34, metadata !DIExpression()), !dbg !51
   br label %for.body, !dbg !52
@@ -87,8 +87,8 @@ for.cond.cleanup:                                 ; preds = %for.body
 for.body:                                         ; preds = %for.body, %entry
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   call void @llvm.dbg.value(metadata i64 %indvars.iv, metadata !34, metadata !DIExpression()), !dbg !51
-  %arrayidx = getelementptr inbounds [256 x i32], [256 x i32]* @array, i64 0, i64 %indvars.iv, !dbg !54
-  store i32 56, i32* %arrayidx, align 4, !dbg !57
+  %arrayidx = getelementptr inbounds [256 x i32], ptr @array, i64 0, i64 %indvars.iv, !dbg !54
+  store i32 56, ptr %arrayidx, align 4, !dbg !57
   %10 = trunc i64 %indvars.iv to i32, !dbg !58
   tail call void (i32, ...) @_Z4funciz(i32 0, i32 %10, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9), !dbg !58
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !59

diff  --git a/llvm/test/CodeGen/AArch64/xor.ll b/llvm/test/CodeGen/AArch64/xor.ll
index 494bdbda949ef..d92402cf43b33 100644
--- a/llvm/test/CodeGen/AArch64/xor.ll
+++ b/llvm/test/CodeGen/AArch64/xor.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
 
-define i32 @PR39657(i8* %p, i64 %x) {
+define i32 @PR39657(ptr %p, i64 %x) {
 ; CHECK-LABEL: PR39657:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mvn x8, x1
@@ -9,9 +9,8 @@ define i32 @PR39657(i8* %p, i64 %x) {
 ; CHECK-NEXT:    ret
   %sh = shl i64 %x, 2
   %mul = xor i64 %sh, -4
-  %add.ptr = getelementptr inbounds i8, i8* %p, i64 %mul
-  %bc = bitcast i8* %add.ptr to i32*
-  %load = load i32, i32* %bc, align 4
+  %add.ptr = getelementptr inbounds i8, ptr %p, i64 %mul
+  %load = load i32, ptr %add.ptr, align 4
   ret i32 %load
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/zext-logic-shift-load.ll b/llvm/test/CodeGen/AArch64/zext-logic-shift-load.ll
index a75862cfc3b34..885302a8ea2c1 100644
--- a/llvm/test/CodeGen/AArch64/zext-logic-shift-load.ll
+++ b/llvm/test/CodeGen/AArch64/zext-logic-shift-load.ll
@@ -1,11 +1,11 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu < %s -o - | FileCheck %s
 
-define i32 @test1(i8* %p) {
+define i32 @test1(ptr %p) {
 ; CHECK:       ldrb
 ; CHECK-NEXT:  ubfx
 ; CHECK-NEXT:  ret
 
-  %1 = load i8, i8* %p
+  %1 = load i8, ptr %p
   %2 = lshr i8 %1, 1
   %3 = and i8 %2, 1
   %4 = zext i8 %3 to i32


        


More information about the llvm-commits mailing list