[llvm] 60442f0 - [CodeGen] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 5 04:23:15 PST 2023


Author: Nikita Popov
Date: 2023-01-05T13:21:20+01:00
New Revision: 60442f0d442723a487528bdd8b48b24657a025e8

URL: https://github.com/llvm/llvm-project/commit/60442f0d442723a487528bdd8b48b24657a025e8
DIFF: https://github.com/llvm/llvm-project/commit/60442f0d442723a487528bdd8b48b24657a025e8.diff

LOG: [CodeGen] Convert some tests to opaque pointers (NFC)

These are mostly MIR tests, which I did not handle during previous
conversions.

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
    llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll
    llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
    llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
    llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
    llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir
    llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
    llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
    llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir
    llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
    llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir
    llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir
    llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir
    llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
    llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir
    llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir
    llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
    llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir
    llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir
    llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir
    llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
    llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
    llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir
    llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
    llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll
    llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir
    llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir
    llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
    llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
    llvm/test/CodeGen/AMDGPU/schedule-ilp.mir
    llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir
    llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
    llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir
    llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
    llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir
    llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
    llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir
    llvm/test/CodeGen/AMDGPU/waitcnt.mir
    llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir
    llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
    llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir
    llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir
    llvm/test/CodeGen/ARM/codesize-ifcvt.mir
    llvm/test/CodeGen/ARM/const-load-align-thumb.mir
    llvm/test/CodeGen/ARM/dbg-range-extension.mir
    llvm/test/CodeGen/ARM/fp16-litpool-arm.mir
    llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir
    llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
    llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
    llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir
    llvm/test/CodeGen/ARM/machine-sink-multidef.mir
    llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
    llvm/test/CodeGen/ARM/noreturn-csr-skip.mir
    llvm/test/CodeGen/ARM/pei-swiftself.mir
    llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir
    llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir
    llvm/test/CodeGen/ARM/single-issue-r52.mir
    llvm/test/CodeGen/ARM/stack_frame_offset.mir
    llvm/test/CodeGen/ARM/store-prepostinc.mir
    llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir
    llvm/test/CodeGen/ARM/vldm-liveness.mir
    llvm/test/CodeGen/ARM/vldmia-sched.mir
    llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
    llvm/test/CodeGen/Hexagon/addrmode-immop.mir
    llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir
    llvm/test/CodeGen/Hexagon/bank-conflict-load.mir
    llvm/test/CodeGen/Hexagon/bank-conflict.mir
    llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir
    llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir
    llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir
    llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir
    llvm/test/CodeGen/Hexagon/early-if-predicator.mir
    llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir
    llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir
    llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir
    llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir
    llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir
    llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir
    llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir
    llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir
    llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir
    llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir
    llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir
    llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir
    llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir
    llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir
    llvm/test/CodeGen/Mips/micromips-eva.mir
    llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir
    llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir
    llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir
    llvm/test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir
    llvm/test/CodeGen/Mips/mirparser/target-flags-pic.mir
    llvm/test/CodeGen/Mips/mirparser/target-flags-static-tls.mir
    llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir
    llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir
    llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir
    llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
    llvm/test/CodeGen/PowerPC/aantidep-def-ec.mir
    llvm/test/CodeGen/PowerPC/addisdtprelha-nonr3.mir
    llvm/test/CodeGen/PowerPC/block-placement-1.mir
    llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir
    llvm/test/CodeGen/PowerPC/livevars-crash1.mir
    llvm/test/CodeGen/PowerPC/livevars-crash2.mir
    llvm/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir
    llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir
    llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir
    llvm/test/CodeGen/PowerPC/phi-eliminate.mir
    llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
    llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir
    llvm/test/CodeGen/PowerPC/schedule-addi-load.mir
    llvm/test/CodeGen/PowerPC/sext_elimination.mir
    llvm/test/CodeGen/PowerPC/shrink-wrap.mir
    llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
    llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
    llvm/test/CodeGen/PowerPC/two-address-crash.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-call.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-write.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-non-loop.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/switch.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-negative-offset.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-killed.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-pred.mir
    llvm/test/CodeGen/Thumb2/bti-const-island.mir
    llvm/test/CodeGen/Thumb2/constant-islands-cbz.mir
    llvm/test/CodeGen/Thumb2/frame-index-addrmode-t2i8s4.mir
    llvm/test/CodeGen/Thumb2/high-reg-spill.mir
    llvm/test/CodeGen/Thumb2/ifcvt-dead-predicate.mir
    llvm/test/CodeGen/Thumb2/m4-sched-ldr.mir
    llvm/test/CodeGen/Thumb2/mve-postinc-distribute.mir
    llvm/test/CodeGen/Thumb2/mve-tp-loop.mir
    llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir
    llvm/test/CodeGen/Thumb2/mve-wls-block-placement.mir
    llvm/test/CodeGen/Thumb2/phi_prevent_copy.mir
    llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir
    llvm/test/CodeGen/Thumb2/postinc-distribute.mir
    llvm/test/CodeGen/Thumb2/store-prepostinc.mir
    llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir
    llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir
    llvm/test/CodeGen/Thumb2/swp-fixedii.mir
    llvm/test/CodeGen/Thumb2/swp-regpressure.mir
    llvm/test/CodeGen/Thumb2/tbb-removeadd.mir
    llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.mir
    llvm/test/CodeGen/WebAssembly/exception.mir
    llvm/test/CodeGen/WebAssembly/function-info.mir
    llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir
    llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
    llvm/test/CodeGen/X86/GlobalISel/legalize-ptr-add.mir
    llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
    llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
    llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
    llvm/test/CodeGen/X86/GlobalISel/select-GV-32.mir
    llvm/test/CodeGen/X86/GlobalISel/select-GV-64.mir
    llvm/test/CodeGen/X86/GlobalISel/select-constant.mir
    llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-unordered.mir
    llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
    llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
    llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
    llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
    llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
    llvm/test/CodeGen/X86/GlobalISel/select-ptr-add.mir
    llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
    llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
    llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
    llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
    llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
    llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
    llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-inttoptr.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-ptrtoint.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-select-inttoptr.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-select-ptrtoint.mir
    llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir
    llvm/test/CodeGen/X86/adx-commute.mir
    llvm/test/CodeGen/X86/basic-block-sections-mir-parse.mir
    llvm/test/CodeGen/X86/block-placement.mir
    llvm/test/CodeGen/X86/callbr-asm-kill.mir
    llvm/test/CodeGen/X86/cf-opt-memops.mir
    llvm/test/CodeGen/X86/codegen-prepare-replacephi.mir
    llvm/test/CodeGen/X86/codegen-prepare-replacephi2.mir
    llvm/test/CodeGen/X86/copy-eflags-liveinlists.mir
    llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding2.mir
    llvm/test/CodeGen/X86/domain-reassignment.mir
    llvm/test/CodeGen/X86/expand-call-rvmarker.mir
    llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
    llvm/test/CodeGen/X86/fixup-bw-inst.mir
    llvm/test/CodeGen/X86/heap-alloc-markers.mir
    llvm/test/CodeGen/X86/implicit-null-checks.mir
    llvm/test/CodeGen/X86/implicit-null-chk-reg-rewrite.mir
    llvm/test/CodeGen/X86/late-remat-update.mir
    llvm/test/CodeGen/X86/lea-opt-with-debug.mir
    llvm/test/CodeGen/X86/limit-split-cost.mir
    llvm/test/CodeGen/X86/machine-cp-mask-reg.mir
    llvm/test/CodeGen/X86/movtopush.mir
    llvm/test/CodeGen/X86/peephole-fold-testrr.mir
    llvm/test/CodeGen/X86/peephole-recurrence.mir
    llvm/test/CodeGen/X86/postra-ignore-dbg-instrs.mir
    llvm/test/CodeGen/X86/pr38952.mir
    llvm/test/CodeGen/X86/pr51903.mir
    llvm/test/CodeGen/X86/pre-coalesce.mir
    llvm/test/CodeGen/X86/regalloc-copy-hints.mir
    llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir
    llvm/test/CodeGen/X86/stack-folding-bmi2.mir
    llvm/test/CodeGen/X86/stack-folding-fp-nofpexcept.mir
    llvm/test/CodeGen/X86/statepoint-fixup-call.mir
    llvm/test/CodeGen/X86/statepoint-fixup-copy-prop-neg.mir
    llvm/test/CodeGen/X86/statepoint-fixup-copy-prop.mir
    llvm/test/CodeGen/X86/statepoint-fixup-invoke.mir
    llvm/test/CodeGen/X86/statepoint-fixup-shared-ehpad.mir
    llvm/test/CodeGen/X86/statepoint-fixup-undef-def.mir
    llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
    llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
    llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
    llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
    llvm/test/CodeGen/X86/statepoint-vreg-twoaddr.mir
    llvm/test/CodeGen/X86/statepoint-vreg.mir
    llvm/test/CodeGen/X86/tail-call-conditional.mir
    llvm/test/CodeGen/X86/taildup-callsiteinfo.mir
    llvm/test/CodeGen/X86/unfoldMemoryOperand.mir
    llvm/test/CodeGen/X86/win64-eh-empty-block-2.mir
    llvm/test/CodeGen/X86/x87-reg-usage.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
index 8ca6f24793a10..fdb0954ee4760 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
@@ -4,7 +4,7 @@
 # RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=GCN,GFX9
 
 --- |
-  define amdgpu_kernel void @smrd_imm(i32 addrspace(4)* %const0) { ret void }
+  define amdgpu_kernel void @smrd_imm(ptr addrspace(4) %const0) { ret void }
   define amdgpu_kernel void @smrd_wide() { ret void }
   define amdgpu_kernel void @constant_address_positive() { ret void }
   define amdgpu_kernel void @smrd_sgpr() { ret void }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir
index 2a75ad860ab64..9b5a0b2de4f81 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir
@@ -2,11 +2,11 @@
 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test_blockaddress() {
-    store i8* blockaddress(@test_blockaddress, %block), i8** @addr
-    indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block]
+    store ptr blockaddress(@test_blockaddress, %block), ptr @addr
+    indirectbr ptr blockaddress(@test_blockaddress, %block), [label %block]
 
   block:
     ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir
index 4864355fda49f..a862d4a9032e8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir
@@ -3,26 +3,26 @@
 
 --- |
 
-  define i32 @widen_load_range0_tbaa(i24 addrspace(1)* %ptr) {
-    %load = load i24, i24 addrspace(1)* %ptr, !range !0, !tbaa !1
+  define i32 @widen_load_range0_tbaa(ptr addrspace(1) %ptr) {
+    %load = load i24, ptr addrspace(1) %ptr, !range !0, !tbaa !1
     %zext = zext i24 %load to i32
     ret i32 %zext
   }
 
-  define i32 @widen_load_range1_tbaa(i24 addrspace(1)* %ptr) {
-    %load = load i24, i24 addrspace(1)* %ptr, !range !0, !tbaa !1
+  define i32 @widen_load_range1_tbaa(ptr addrspace(1) %ptr) {
+    %load = load i24, ptr addrspace(1) %ptr, !range !0, !tbaa !1
     %zext = zext i24 %load to i32
     ret i32 %zext
   }
 
-  define i32 @widen_load_tbaa0(i24 addrspace(1)* %ptr) {
-    %load = load i24, i24 addrspace(1)* %ptr, !tbaa !1
+  define i32 @widen_load_tbaa0(ptr addrspace(1) %ptr) {
+    %load = load i24, ptr addrspace(1) %ptr, !tbaa !1
     %zext = zext i24 %load to i32
     ret i32 %zext
   }
 
-  define i32 @widen_load_tbaa1(i24 addrspace(1)* %ptr) {
-    %load = load i24, i24 addrspace(1)* %ptr, !tbaa !1
+  define i32 @widen_load_tbaa1(ptr addrspace(1) %ptr) {
+    %load = load i24, ptr addrspace(1) %ptr, !tbaa !1
     %zext = zext i24 %load to i32
     ret i32 %zext
   }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir
index 26d5e5fb26128..684b5ec3883b2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir
@@ -30,7 +30,7 @@
 # CHECK-NEXT: V_ADD_CO_U32_e64 [[SWIZZLE2]], {{%[0-9]+}}, 0, implicit $exec
 
 --- |
-  define amdgpu_kernel void @no_cse(i32 addrspace(1)*, i32, i1) {
+  define amdgpu_kernel void @no_cse(ptr addrspace(1), i32, i1) {
   entry:
     unreachable
   if.then:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir
index 2e3347482c4f8..a50c7fe0748b8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir
@@ -3,11 +3,11 @@
 
 --- |
 
-  @addr = global i8* null
+  @addr = global ptr null
 
   define void @test_blockaddress() {
-    store i8* blockaddress(@test_blockaddress, %block), i8** @addr
-    indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block]
+    store ptr blockaddress(@test_blockaddress, %block), ptr @addr
+    indirectbr ptr blockaddress(@test_blockaddress, %block), [label %block]
 
   block:                                            ; preds = %0
     ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
index 7058451127e23..ad71b96fd9d95 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir
@@ -3,72 +3,72 @@
 # RUN: llc -amdgpu-global-isel-new-legality -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
 
 --- |
-  define amdgpu_kernel void @load_global_v8i32_non_uniform(<8 x i32> addrspace(1)* %in) {
+  define amdgpu_kernel void @load_global_v8i32_non_uniform(ptr addrspace(1) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %global.not.uniform.v8i32 = getelementptr <8 x i32>, <8 x i32> addrspace(1)* %in, i32 %tmp0
-    %tmp2 = load <8 x i32>, <8 x i32> addrspace(1)* %global.not.uniform.v8i32
+    %global.not.uniform.v8i32 = getelementptr <8 x i32>, ptr addrspace(1) %in, i32 %tmp0
+    %tmp2 = load <8 x i32>, ptr addrspace(1) %global.not.uniform.v8i32
     ret void
   }
 
-  define amdgpu_kernel void @load_global_v4i64_non_uniform(<4 x i64> addrspace(1)* %in) {
+  define amdgpu_kernel void @load_global_v4i64_non_uniform(ptr addrspace(1) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %global.not.uniform.v4i64 = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tmp0
-    %tmp2 = load <4 x i64>, <4 x i64> addrspace(1)* %global.not.uniform.v4i64
+    %global.not.uniform.v4i64 = getelementptr <4 x i64>, ptr addrspace(1) %in, i32 %tmp0
+    %tmp2 = load <4 x i64>, ptr addrspace(1) %global.not.uniform.v4i64
     ret void
   }
-  define amdgpu_kernel void @load_global_v16i32_non_uniform(<16 x i32> addrspace(1)* %in) {
+  define amdgpu_kernel void @load_global_v16i32_non_uniform(ptr addrspace(1) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %global.not.uniform.v16i32 = getelementptr <16 x i32>, <16 x i32> addrspace(1)* %in, i32 %tmp0
-    %tmp2 = load <16 x i32>, <16 x i32> addrspace(1)* %global.not.uniform.v16i32
+    %global.not.uniform.v16i32 = getelementptr <16 x i32>, ptr addrspace(1) %in, i32 %tmp0
+    %tmp2 = load <16 x i32>, ptr addrspace(1) %global.not.uniform.v16i32
     ret void
   }
-  define amdgpu_kernel void @load_global_v8i64_non_uniform(<8 x i64> addrspace(1)* %in) {
+  define amdgpu_kernel void @load_global_v8i64_non_uniform(ptr addrspace(1) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %global.not.uniform.v8i64 = getelementptr <8 x i64>, <8 x i64> addrspace(1)* %in, i32 %tmp0
-    %tmp2 = load <8 x i64>, <8 x i64> addrspace(1)* %global.not.uniform.v8i64
+    %global.not.uniform.v8i64 = getelementptr <8 x i64>, ptr addrspace(1) %in, i32 %tmp0
+    %tmp2 = load <8 x i64>, ptr addrspace(1) %global.not.uniform.v8i64
     ret void
   }
   define amdgpu_kernel void @load_global_v8i32_uniform() {ret void}
   define amdgpu_kernel void @load_global_v4i64_uniform() {ret void}
   define amdgpu_kernel void @load_global_v16i32_uniform() {ret void}
   define amdgpu_kernel void @load_global_v8i64_uniform() {ret void}
-  define amdgpu_kernel void @load_constant_v8i32_non_uniform(<8 x i32> addrspace(4)* %in) {
+  define amdgpu_kernel void @load_constant_v8i32_non_uniform(ptr addrspace(4) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %constant.not.uniform.v8i32 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %in, i32 %tmp0
-    %tmp2 = load <8 x i32>, <8 x i32> addrspace(4)* %constant.not.uniform.v8i32
+    %constant.not.uniform.v8i32 = getelementptr <8 x i32>, ptr addrspace(4) %in, i32 %tmp0
+    %tmp2 = load <8 x i32>, ptr addrspace(4) %constant.not.uniform.v8i32
     ret void
   }
 
-  define amdgpu_kernel void @load_constant_i256_non_uniform(i256 addrspace(4)* %in) {
+  define amdgpu_kernel void @load_constant_i256_non_uniform(ptr addrspace(4) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %constant.not.uniform = getelementptr i256, i256 addrspace(4)* %in, i32 %tmp0
-    %tmp2 = load i256, i256 addrspace(4)* %constant.not.uniform
+    %constant.not.uniform = getelementptr i256, ptr addrspace(4) %in, i32 %tmp0
+    %tmp2 = load i256, ptr addrspace(4) %constant.not.uniform
     ret void
   }
 
-  define amdgpu_kernel void @load_constant_v16i16_non_uniform(<16 x i16> addrspace(4)* %in) {
+  define amdgpu_kernel void @load_constant_v16i16_non_uniform(ptr addrspace(4) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %constant.not.uniform = getelementptr <16 x i16>, <16 x i16> addrspace(4)* %in, i32 %tmp0
-    %tmp2 = load <16 x i16>, <16 x i16> addrspace(4)* %constant.not.uniform
+    %constant.not.uniform = getelementptr <16 x i16>, ptr addrspace(4) %in, i32 %tmp0
+    %tmp2 = load <16 x i16>, ptr addrspace(4) %constant.not.uniform
     ret void
   }
 
-  define amdgpu_kernel void @load_constant_v4i64_non_uniform(<4 x i64> addrspace(4)* %in) {
+  define amdgpu_kernel void @load_constant_v4i64_non_uniform(ptr addrspace(4) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %constant.not.uniform.v4i64 = getelementptr <4 x i64>, <4 x i64> addrspace(4)* %in, i32 %tmp0
-    %tmp2 = load <4 x i64>, <4 x i64> addrspace(4)* %constant.not.uniform.v4i64
+    %constant.not.uniform.v4i64 = getelementptr <4 x i64>, ptr addrspace(4) %in, i32 %tmp0
+    %tmp2 = load <4 x i64>, ptr addrspace(4) %constant.not.uniform.v4i64
     ret void
   }
-  define amdgpu_kernel void @load_constant_v16i32_non_uniform(<16 x i32> addrspace(4)* %in) {
+  define amdgpu_kernel void @load_constant_v16i32_non_uniform(ptr addrspace(4) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %constant.not.uniform.v16i32 = getelementptr <16 x i32>, <16 x i32> addrspace(4)* %in, i32 %tmp0
-    %tmp2 = load <16 x i32>, <16 x i32> addrspace(4)* %constant.not.uniform.v16i32
+    %constant.not.uniform.v16i32 = getelementptr <16 x i32>, ptr addrspace(4) %in, i32 %tmp0
+    %tmp2 = load <16 x i32>, ptr addrspace(4) %constant.not.uniform.v16i32
     ret void
   }
-  define amdgpu_kernel void @load_constant_v8i64_non_uniform(<8 x i64> addrspace(4)* %in) {
+  define amdgpu_kernel void @load_constant_v8i64_non_uniform(ptr addrspace(4) %in) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %constant.not.uniform.v8i64 = getelementptr <8 x i64>, <8 x i64> addrspace(4)* %in, i32 %tmp0
-    %tmp2 = load <8 x i64>, <8 x i64> addrspace(4)* %constant.not.uniform.v8i64
+    %constant.not.uniform.v8i64 = getelementptr <8 x i64>, ptr addrspace(4) %in, i32 %tmp0
+    %tmp2 = load <8 x i64>, ptr addrspace(4) %constant.not.uniform.v8i64
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir
index d111563294307..938ab1643dea0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir
@@ -3,13 +3,13 @@
 
 --- |
 
-  define amdgpu_ps i96 @split_smrd_load_range(i96 addrspace(4)* %ptr) {
-    %load = load i96, i96 addrspace(4)* %ptr, !range !0
+  define amdgpu_ps i96 @split_smrd_load_range(ptr addrspace(4) %ptr) {
+    %load = load i96, ptr addrspace(4) %ptr, !range !0
     ret i96 %load
   }
 
-  define amdgpu_ps <3 x i32> @split_smrd_load_tbaa(<3 x i32> addrspace(4)* %ptr) {
-    %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, !tbaa !1
+  define amdgpu_ps <3 x i32> @split_smrd_load_tbaa(ptr addrspace(4) %ptr) {
+    %load = load <3 x i32>, ptr addrspace(4) %ptr, !tbaa !1
     ret <3 x i32> %load
   }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
index 3ef20a453f4ef..76ee2f9158b73 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
@@ -2,49 +2,49 @@
 # RUN: llc -march=amdgcn -mcpu=hawaii -mattr=+flat-for-global -run-pass=regbankselect %s -verify-machineinstrs -o - | FileCheck %s
 
 --- |
-  define amdgpu_kernel void @load_constant(i32 addrspace(4)* %ptr0) {
+  define amdgpu_kernel void @load_constant(ptr addrspace(4) %ptr0) {
     ret void
   }
 
-  define amdgpu_kernel void @load_constant_volatile(i32 addrspace(4)* %ptr0) {
+  define amdgpu_kernel void @load_constant_volatile(ptr addrspace(4) %ptr0) {
     ret void
   }
 
-  define amdgpu_kernel void @load_global_uniform_invariant(i32 addrspace(1)* %ptr1) {
-    %tmp0 = load i32, i32 addrspace(1)* %ptr1
+  define amdgpu_kernel void @load_global_uniform_invariant(ptr addrspace(1) %ptr1) {
+    %tmp0 = load i32, ptr addrspace(1) %ptr1
     ret void
   }
 
-  define amdgpu_kernel void @load_global_uniform_noclobber(i32 addrspace(1)* %ptr1) {
-    %tmp0 = load i32, i32 addrspace(1)* %ptr1, !amdgpu.noclobber !0
+  define amdgpu_kernel void @load_global_uniform_noclobber(ptr addrspace(1) %ptr1) {
+    %tmp0 = load i32, ptr addrspace(1) %ptr1, !amdgpu.noclobber !0
     ret void
   }
 
-  define amdgpu_kernel void @load_global_uniform_variant(i32 addrspace(1)* %ptr1) {
-    %tmp0 = load i32, i32 addrspace(1)* %ptr1
+  define amdgpu_kernel void @load_global_uniform_variant(ptr addrspace(1) %ptr1) {
+    %tmp0 = load i32, ptr addrspace(1) %ptr1
     ret void
   }
 
-  define amdgpu_kernel void @load_global_uniform_volatile_invariant(i32 addrspace(1)* %ptr1) {
-    %tmp0 = load i32, i32 addrspace(1)* %ptr1
+  define amdgpu_kernel void @load_global_uniform_volatile_invariant(ptr addrspace(1) %ptr1) {
+    %tmp0 = load i32, ptr addrspace(1) %ptr1
     ret void
   }
 
-  define amdgpu_kernel void @load_global_uniform_atomic_invariant(i32 addrspace(1)* %ptr1) {
-    %tmp0 = load i32, i32 addrspace(1)* %ptr1
+  define amdgpu_kernel void @load_global_uniform_atomic_invariant(ptr addrspace(1) %ptr1) {
+    %tmp0 = load i32, ptr addrspace(1) %ptr1
     ret void
   }
 
-  define amdgpu_kernel void @load_global_non_uniform(i32 addrspace(1)* %ptr2) {
+  define amdgpu_kernel void @load_global_non_uniform(ptr addrspace(1) %ptr2) {
     %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
-    %tmp1 = getelementptr i32, i32 addrspace(1)* %ptr2, i32 %tmp0
-    %tmp2 = load i32, i32 addrspace(1)* %tmp1
+    %tmp1 = getelementptr i32, ptr addrspace(1) %ptr2, i32 %tmp0
+    %tmp2 = load i32, ptr addrspace(1) %tmp1
     ret void
   }
 
   define void @non_power_of_2() { ret void }
 
-  define amdgpu_kernel void @load_constant_v4i16_from_8_align8(<3 x i16> addrspace(4)* %ptr0) {
+  define amdgpu_kernel void @load_constant_v4i16_from_8_align8(ptr addrspace(4) %ptr0) {
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll b/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll
index b1458270bba94..999a1b4fa0815 100644
--- a/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefix=ISA
 ; RUN: llc -march=amdgcn -mcpu=gfx1010 -stop-before=si-fix-sgpr-copies < %s | FileCheck %s -check-prefix=MIR
 
-define void @f(i32 %arg, float* %ptr) {
+define void @f(i32 %arg, ptr %ptr) {
 ; ISA-LABEL: f:
 ; ISA:       ; %bb.0: ; %bb
 ; ISA-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -59,7 +59,7 @@ define void @f(i32 %arg, float* %ptr) {
   ; MIR-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
   ; MIR-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; MIR-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
-  ; MIR-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[S_MOV_B64_]], 0, 0 :: (invariant load (s64) from `<2 x i32> addrspace(4)* null`, align 4294967296, addrspace 4)
+  ; MIR-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[S_MOV_B64_]], 0, 0 :: (invariant load (s64) from `ptr addrspace(4) null`, align 4294967296, addrspace 4)
   ; MIR-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub1
   ; MIR-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub0
   ; MIR-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
@@ -118,7 +118,7 @@ define void @f(i32 %arg, float* %ptr) {
   ; MIR-NEXT:   FLAT_STORE_DWORD [[COPY8]], [[PHI2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr)
   ; MIR-NEXT:   SI_RETURN
 bb:
-  %i = load <2 x i32>, <2 x i32> addrspace(4)* null, align 4294967296
+  %i = load <2 x i32>, ptr addrspace(4) null, align 4294967296
   %i1 = extractelement <2 x i32> %i, i64 1
   %i2 = extractelement <2 x i32> %i, i64 0
   %i3 = lshr i32 %i1, 1
@@ -144,7 +144,7 @@ bb14:
   br i1 %i20, label %bb14, label %bb21
 
 bb21:
-  store float %i15, float* %ptr, align 4
+  store float %i15, ptr %ptr, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
index 276c8098c4bfc..3bebc640ac116 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
+++ b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
@@ -43,8 +43,8 @@ body:             |
 
     %3 = COPY $vgpr0
     %0 = COPY $sgpr0_sgpr1
-    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
@@ -105,8 +105,8 @@ body:             |
 
     %3 = COPY $vgpr0
     %0 = COPY $sgpr0_sgpr1
-    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
@@ -168,8 +168,8 @@ body:             |
 
     %3 = COPY $vgpr0
     %0 = COPY $sgpr0_sgpr1
-    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
@@ -233,8 +233,8 @@ body:             |
 
     %3 = COPY $vgpr0
     %0 = COPY $sgpr0_sgpr1
-    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
@@ -310,8 +310,8 @@ body:             |
 
     %3 = COPY $vgpr0
     %0 = COPY $sgpr0_sgpr1
-    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
@@ -375,8 +375,8 @@ body:             |
 
     %3 = COPY $vgpr0
     %0 = COPY $sgpr0_sgpr1
-    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
index 419eacb96da17..a077e67877bda 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
@@ -128,6 +128,6 @@ body:             |
     %28:vgpr_32 = IMAGE_LOAD_V1_V4 killed %25, killed %27, 2, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128) from constant-pool, addrspace 4)
     %29:vgpr_32 = nofpexcept V_ADD_F32_e32 0, killed %28, implicit $mode, implicit $exec
     $m0 = S_MOV_B32 -1
-    DS_WRITE_B32 undef %30:vgpr_32, killed %29, 0, 0, implicit $m0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
+    DS_WRITE_B32 undef %30:vgpr_32, killed %29, 0, 0, implicit $m0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`, addrspace 3)
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index 99c20364cd745..c347860570ba8 100644
--- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -723,7 +723,7 @@ body:             |
 
     %2:vgpr_32 = COPY $vgpr0
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %3:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %3:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %15:vgpr_32 = V_ASHRREV_I32_e64 31, %2, implicit $exec
     %16:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %15, %subreg.sub1
     %17:vreg_64 = V_LSHLREV_B64_e64 2, killed %16, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir
index d5aa9b6309173..b2af7dfe9002a 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir
+++ b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir
@@ -4,18 +4,18 @@
 # GCN:      FLAT_LOAD_DWORD
 # GCN-NEXT: FLAT_LOAD_DWORD
 --- |
-  define amdgpu_kernel void @flat_load_clustering(i32 addrspace(1)* nocapture %arg, i32 addrspace(4)* nocapture readonly %arg1) {
+  define amdgpu_kernel void @flat_load_clustering(ptr addrspace(1) nocapture %arg, ptr addrspace(4) nocapture readonly %arg1) {
   bb:
     %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
     %idxprom = sext i32 %tid to i64
-    %gep1 = getelementptr inbounds i32, i32 addrspace(4)* %arg1, i64 %idxprom
-    %load1 = load i32, i32 addrspace(4)* %gep1, align 4
-    %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %idxprom
-    %gep34 = getelementptr inbounds i32, i32 addrspace(4)* %gep1, i64 4
-    %load2 = load i32, i32 addrspace(4)* %gep34, align 4
-    %gep4 = getelementptr inbounds i32, i32 addrspace(1)* %gep2, i64 4
-    store i32 %load1, i32 addrspace(1)* %gep2, align 4
-    store i32 %load2, i32 addrspace(1)* %gep4, align 4
+    %gep1 = getelementptr inbounds i32, ptr addrspace(4) %arg1, i64 %idxprom
+    %load1 = load i32, ptr addrspace(4) %gep1, align 4
+    %gep2 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %idxprom
+    %gep34 = getelementptr inbounds i32, ptr addrspace(4) %gep1, i64 4
+    %load2 = load i32, ptr addrspace(4) %gep34, align 4
+    %gep4 = getelementptr inbounds i32, ptr addrspace(1) %gep2, i64 4
+    store i32 %load1, ptr addrspace(1) %gep2, align 4
+    store i32 %load2, ptr addrspace(1) %gep4, align 4
     ret void
   }
 
@@ -54,8 +54,8 @@ body:             |
 
     %1 = COPY $sgpr4_sgpr5
     %0 = COPY $vgpr0
-    %3 = S_LOAD_DWORDX2_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %4 = S_LOAD_DWORDX2_IMM %1, 8, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    %3 = S_LOAD_DWORDX2_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %4 = S_LOAD_DWORDX2_IMM %1, 8, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     %7 = V_LSHLREV_B32_e32 2, %0, implicit $exec
     %2 = V_MOV_B32_e32 0, implicit $exec
     undef %12.sub0 = V_ADD_CO_U32_e32 %4.sub0, %7, implicit-def $vcc, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
index 2383e1a32064d..dd61d9cf4bb2d 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
@@ -1,104 +1,104 @@
 # RUN: llc --mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -run-pass si-fold-operands,si-shrink-instructions %s -o - | FileCheck %s
 --- |
   define amdgpu_kernel void @add_f32_1.0_one_f16_use() #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
-    %f32.val = load volatile float, float addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
+    %f32.val = load volatile float, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xH3C00
     %f32.add = fadd float %f32.val, 1.000000e+00
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile float %f32.add, float addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile float %f32.add, ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_f32_1.0_multi_f16_use() #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
-    %f32.val = load volatile float, float addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
+    %f32.val = load volatile float, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xH3C00
     %f32.add = fadd float %f32.val, 1.000000e+00
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile float %f32.add, float addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile float %f32.add, ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_f32_1.0_one_f32_use_one_f16_use () #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
-    %f32.val = load volatile float, float addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
+    %f32.val = load volatile float, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xH3C00
     %f32.add = fadd float %f32.val, 1.000000e+00
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile float %f32.add, float addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile float %f32.add, ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_f32_1.0_one_f32_use_multi_f16_use () #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
-    %f32.val = load volatile float, float addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
+    %f32.val = load volatile float, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xH3C00
     %f16.add1 = fadd half %f16.val1, 0xH3C00
     %f32.add = fadd float %f32.val, 1.000000e+00
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile half %f16.add1, half addrspace(1)* undef
-    store volatile float %f32.add, float addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile half %f16.add1, ptr addrspace(1) undef
+    store volatile float %f32.add, ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_i32_1_multi_f16_use() #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xH0001
     %f16.add1 = fadd half %f16.val1, 0xH0001
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile half %f16.add1,half addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile half %f16.add1,ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_i32_m2_one_f32_use_multi_f16_use () #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
-    %f32.val = load volatile float, float addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
+    %f32.val = load volatile float, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xHFFFE
     %f16.add1 = fadd half %f16.val1, 0xHFFFE
     %f32.add = fadd float %f32.val, 0xffffffffc0000000
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile half %f16.add1, half addrspace(1)* undef
-    store volatile float %f32.add, float addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile half %f16.add1, ptr addrspace(1) undef
+    store volatile float %f32.add, ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_f16_1.0_multi_f32_use() #0 {
-    %f32.val0 = load volatile float, float addrspace(1)* undef
-    %f32.val1 = load volatile float, float addrspace(1)* undef
-    %f32.val = load volatile float, float addrspace(1)* undef
+    %f32.val0 = load volatile float, ptr addrspace(1) undef
+    %f32.val1 = load volatile float, ptr addrspace(1) undef
+    %f32.val = load volatile float, ptr addrspace(1) undef
     %f32.add0 = fadd float %f32.val0, 1.0
     %f32.add1 = fadd float %f32.val1, 1.0
-    store volatile float %f32.add0, float addrspace(1)* undef
-    store volatile float %f32.add1, float addrspace(1)* undef
+    store volatile float %f32.add0, ptr addrspace(1) undef
+    store volatile float %f32.add1, ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_f16_1.0_other_high_bits_multi_f16_use() #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
-    %f32.val = load volatile half, half addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
+    %f32.val = load volatile half, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xH3C00
     %f32.add = fadd half %f32.val, 1.000000e+00
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile half %f32.add, half addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile half %f32.add, ptr addrspace(1) undef
     ret void
   }
 
   define amdgpu_kernel void @add_f16_1.0_other_high_bits_use_f16_f32() #0 {
-    %f16.val0 = load volatile half, half addrspace(1)* undef
-    %f16.val1 = load volatile half, half addrspace(1)* undef
-    %f32.val = load volatile half, half addrspace(1)* undef
+    %f16.val0 = load volatile half, ptr addrspace(1) undef
+    %f16.val1 = load volatile half, ptr addrspace(1) undef
+    %f32.val = load volatile half, ptr addrspace(1) undef
     %f16.add0 = fadd half %f16.val0, 0xH3C00
     %f32.add = fadd half %f32.val, 1.000000e+00
-    store volatile half %f16.add0, half addrspace(1)* undef
-    store volatile half %f32.add, half addrspace(1)* undef
+    store volatile half %f16.add0, ptr addrspace(1) undef
+    store volatile half %f32.add, ptr addrspace(1) undef
     ret void
   }
 
@@ -158,10 +158,10 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
     %12 = V_MOV_B32_e32 1065353216, implicit $exec
     %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -221,13 +221,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
     %13 = V_MOV_B32_e32 1065353216, implicit $exec
     %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $mode, implicit $exec
     %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -288,14 +288,14 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
     %14 = V_MOV_B32_e32 1065353216, implicit $exec
     %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec
     %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -358,16 +358,16 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
     %14 = V_MOV_B32_e32 1065353216, implicit $exec
     %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec
     %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $mode, implicit $exec
     %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -424,13 +424,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
     %13 = V_MOV_B32_e32 1, implicit $exec
     %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $mode, implicit $exec
     %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -490,16 +490,16 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
     %14 = V_MOV_B32_e32 -2, implicit $exec
     %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec
     %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $mode, implicit $exec
     %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -559,13 +559,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
     %13 = V_MOV_B32_e32 15360, implicit $exec
     %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec
     %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -624,13 +624,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
     %13 = V_MOV_B32_e32 80886784, implicit $exec
     %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec
     %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...
@@ -686,13 +686,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`)
     %13 = V_MOV_B32_e32 305413120, implicit $exec
     %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec
     %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec
-    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir b/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
index 1969f9bdcd869..5a33425623659 100644
--- a/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
+++ b/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
@@ -9,7 +9,7 @@
 name:            flat_atomic_fcmpswap_to_s_denorm_mode
 body:            |
   bb.0:
-    FLAT_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -21,7 +21,7 @@ body:            |
 name:            flat_atomic_fcmpswap_x2_to_s_denorm_mode
 body:            |
   bb.0:
-    FLAT_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -33,7 +33,7 @@ body:            |
 name:            flat_atomic_fmax_to_s_denorm_mode
 body:            |
   bb.0:
-    FLAT_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -45,7 +45,7 @@ body:            |
 name:            flat_atomic_fmax_x2_to_s_denorm_mode
 body:            |
   bb.0:
-    FLAT_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -57,7 +57,7 @@ body:            |
 name:            flat_atomic_fmin_to_s_denorm_mode
 body:            |
   bb.0:
-    FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -69,7 +69,7 @@ body:            |
 name:            flat_atomic_fmin_x2_to_s_denorm_mode
 body:            |
   bb.0:
-    FLAT_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -81,7 +81,7 @@ body:            |
 name:            flat_atomic_fcmpswap_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = FLAT_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = FLAT_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -93,7 +93,7 @@ body:            |
 name:            flat_atomic_fmax_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = FLAT_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = FLAT_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -105,7 +105,7 @@ body:            |
 name:            flat_atomic_fmax_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = FLAT_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = FLAT_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -117,7 +117,7 @@ body:            |
 name:            flat_atomic_fmin_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = FLAT_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = FLAT_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -129,7 +129,7 @@ body:            |
 name:            flat_atomic_fmin_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = FLAT_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = FLAT_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -141,7 +141,7 @@ body:            |
 name:            flat_atomic_fcmpswap_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = FLAT_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = FLAT_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -153,7 +153,7 @@ body:            |
 name:            global_atomic_fcmpswap_to_s_denorm_mode
 body:            |
   bb.0:
-    GLOBAL_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    GLOBAL_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -165,7 +165,7 @@ body:            |
 name:            global_atomic_fcmpswap_x2_to_s_denorm_mode
 body:            |
   bb.0:
-    GLOBAL_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    GLOBAL_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -177,7 +177,7 @@ body:            |
 name:            global_atomic_fmax_to_s_denorm_mode
 body:            |
   bb.0:
-    GLOBAL_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    GLOBAL_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -189,7 +189,7 @@ body:            |
 name:            global_atomic_fmax_x2_to_s_denorm_mode
 body:            |
   bb.0:
-    GLOBAL_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    GLOBAL_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -201,7 +201,7 @@ body:            |
 name:            global_atomic_fmin_to_s_denorm_mode
 body:            |
   bb.0:
-    GLOBAL_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    GLOBAL_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -213,7 +213,7 @@ body:            |
 name:            global_atomic_fmin_x2_to_s_denorm_mode
 body:            |
   bb.0:
-    GLOBAL_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    GLOBAL_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -225,7 +225,7 @@ body:            |
 name:            global_atomic_fcmpswap_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = GLOBAL_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = GLOBAL_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -237,7 +237,7 @@ body:            |
 name:            global_atomic_fcmpswap_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -249,7 +249,7 @@ body:            |
 name:            global_atomic_fmax_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -261,7 +261,7 @@ body:            |
 name:            global_atomic_fmax_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -273,7 +273,7 @@ body:            |
 name:            global_atomic_fmin_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -285,7 +285,7 @@ body:            |
 name:            global_atomic_fmin_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -297,7 +297,7 @@ body:            |
 name:            global_atomic_fcmpswap_saddr_to_s_denorm_mode
 body:            |
   bb.0:
-    GLOBAL_ATOMIC_FCMPSWAP_SADDR undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    GLOBAL_ATOMIC_FCMPSWAP_SADDR undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -309,7 +309,7 @@ body:            |
 name:            global_atomic_fcmpswap_x2_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_128, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_128, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -321,7 +321,7 @@ body:            |
 name:            global_atomic_fmax_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -333,7 +333,7 @@ body:            |
 name:            global_atomic_fmax_x2_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -345,7 +345,7 @@ body:            |
 name:            global_atomic_fmin_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -357,7 +357,7 @@ body:            |
 name:            global_atomic_fmin_x2_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
-    %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
@@ -369,7 +369,7 @@ body:            |
 name:            flat_fp_atomic_to_s_denorm_mode_waitcnt
 body:            |
   bb.0:
-    FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     S_WAITCNT 0
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
@@ -382,7 +382,7 @@ body:            |
 name:            flat_fp_atomic_to_s_denorm_mode_valu
 body:            |
   bb.0:
-    FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`)
+    FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`)
     %2:vgpr_32 = V_ADD_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, implicit $mode, implicit $exec
     S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir
index 221e6a87ec47c..11ddb7ae6bae4 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir
@@ -2,10 +2,10 @@
 --- |
   define amdgpu_ps <4 x float> @exp_done_waitcnt(<4 x i32> inreg, <4 x
   i32> inreg, i32 inreg %w, float %v) #0 {
-    %a = load volatile float, float addrspace(1)* undef
-    %b = load volatile float, float addrspace(1)* undef
-    %c = load volatile float, float addrspace(1)* undef
-    %d = load volatile float, float addrspace(1)* undef
+    %a = load volatile float, ptr addrspace(1) undef
+    %b = load volatile float, ptr addrspace(1) undef
+    %c = load volatile float, ptr addrspace(1) undef
+    %d = load volatile float, ptr addrspace(1) undef
     call void @llvm.amdgcn.exp.f32(i32 15, i32 1, float %a, float %b, float %c, float %d, i1 true, i1 false)
     ret <4 x float> <float 5.000000e-01, float 1.000000e+00, float 2.000000e+00, float 4.000000e+00>
   }
@@ -49,10 +49,10 @@ body:             |
   bb.0 (%ir-block.2):
     $sgpr3 = S_MOV_B32 61440
     $sgpr2 = S_MOV_B32 -1
-    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
-    $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
-    $vgpr2 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
-    $vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`)
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
+    $vgpr2 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
+    $vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`)
     EXP_DONE 0, killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3, -1, -1, 15, implicit $exec
     $vgpr0 = V_MOV_B32_e32 1056964608, implicit $exec
     $vgpr1 = V_MOV_B32_e32 1065353216, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
index f636cfd0d337b..1b669a6f77fa8 100644
--- a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
+++ b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
@@ -1,21 +1,21 @@
 # RUN: llc -run-pass block-placement -march=amdgcn -verify-machineinstrs -o - %s | FileCheck %s
 --- |
 
-  define amdgpu_kernel void @invert_br_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 {
+  define amdgpu_kernel void @invert_br_undef_vcc(float %cond, ptr addrspace(1) %out) #0 {
   entry:
     br i1 undef, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0
 
   else:                                             ; preds = %entry
-    store volatile i32 100, i32 addrspace(1)* undef
+    store volatile i32 100, ptr addrspace(1) undef
     br label %done, !structurizecfg.uniform !0
 
   if:                                               ; preds = %entry
-    store volatile i32 9, i32 addrspace(1)* undef
+    store volatile i32 9, ptr addrspace(1) undef
     br label %done, !structurizecfg.uniform !0
 
   done:                                             ; preds = %if, %else
     %value = phi i32 [ 0, %if ], [ 1, %else ]
-    store i32 %value, i32 addrspace(1)* %out
+    store i32 %value, ptr addrspace(1) %out
     ret void
   }
 
@@ -55,7 +55,7 @@ body:             |
   bb.0.entry:
     liveins: $sgpr0_sgpr1
 
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     $sgpr7 = S_MOV_B32 61440
     $sgpr6 = S_MOV_B32 -1
     S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
@@ -64,7 +64,7 @@ body:             |
     liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
     $vgpr0 = V_MOV_B32_e32 100, implicit $exec
-    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `i32 addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
     $vgpr0 = V_MOV_B32_e32 1, implicit $exec
     S_BRANCH %bb.3
 
@@ -72,7 +72,7 @@ body:             |
     liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
     $vgpr0 = V_MOV_B32_e32 9, implicit $exec
-    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `i32 addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`)
     $vgpr0 = V_MOV_B32_e32 0, implicit $exec
 
   bb.3.done:

diff  --git a/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir b/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir
index 8c05791e04d06..1e4618b7eebe2 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir
@@ -10,8 +10,8 @@ name: buffer_load_dword_lds_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4)
-    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4)
+    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -27,9 +27,9 @@ name: buffer_load_dword_lds_vmcnt_1
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`), (store (s32) into `i32 addrspace(3)* undef`)
-    $vgpr10 = BUFFER_LOAD_DWORD_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`)
-    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`), (store (s32) into `ptr addrspace(3) undef`)
+    $vgpr10 = BUFFER_LOAD_DWORD_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`)
+    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -44,8 +44,8 @@ name: buffer_load_dword_lds_flat_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`), (store (s32) into `i32 addrspace(3)* undef`)
-    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`), (store (s32) into `ptr addrspace(3) undef`)
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
 
     S_ENDPGM 0
 
@@ -61,8 +61,8 @@ name: global_load_lds_dword_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    GLOBAL_LOAD_LDS_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4)
-    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    GLOBAL_LOAD_LDS_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4)
+    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -77,8 +77,8 @@ name: scratch_load_lds_dword_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    SCRATCH_LOAD_LDS_DWORD $vgpr0, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(5)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4)
-    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    SCRATCH_LOAD_LDS_DWORD $vgpr0, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(5) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4)
+    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -91,8 +91,8 @@ name: buffer_store_lds_dword_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_STORE_LDS_DWORD $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(3)* undef` + 4), (store (s32) into `i32 addrspace(1)* undef` + 4)
-    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    BUFFER_STORE_LDS_DWORD $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(3) undef` + 4), (store (s32) into `ptr addrspace(1) undef` + 4)
+    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -110,10 +110,10 @@ name: series_of_buffer_load_dword_lds_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`), (store (s32) into `i32 addrspace(3)* undef`)
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4)
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 8), (store (s32) into `i32 addrspace(3)* undef` + 8)
-    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`), (store (s32) into `ptr addrspace(3) undef`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 8), (store (s32) into `ptr addrspace(3) undef` + 8)
+    $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir
index 855acda9f9d88..bac8a47552f55 100644
--- a/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir
+++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir
@@ -4,30 +4,30 @@
 # operations correctly with/without DLC bit.
 
 --- |
-  define amdgpu_kernel void @test1(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test1(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
 
-  define amdgpu_kernel void @test2(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test2(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
 
-  define amdgpu_kernel void @test3(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test3(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
-  define amdgpu_kernel void @test4(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test4(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
 ...
@@ -48,7 +48,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
@@ -79,7 +79,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
@@ -110,7 +110,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
@@ -140,7 +140,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2

diff  --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir
index 8fbe2814b4347..19f44dc6164f8 100644
--- a/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir
+++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir
@@ -4,30 +4,30 @@
 # operations correctly with/without SCC bit.
 
 --- |
-  define amdgpu_kernel void @test1(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test1(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
 
-  define amdgpu_kernel void @test2(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test2(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
 
-  define amdgpu_kernel void @test3(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test3(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
-  define amdgpu_kernel void @test4(i32 addrspace(1)* %out) {
-    %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
-    store i32 123, i32 addrspace(1)* %out.gep.1
-    store i32 456, i32 addrspace(1)* %out
+  define amdgpu_kernel void @test4(ptr addrspace(1) %out) {
+    %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
+    store i32 123, ptr addrspace(1) %out.gep.1
+    store i32 456, ptr addrspace(1) %out
     ret void
   }
 ...
@@ -48,7 +48,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
@@ -79,7 +79,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
@@ -110,7 +110,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
@@ -140,7 +140,7 @@ body: |
     $sgpr3 = S_MOV_B32 61440
 
     %0:sgpr_64 = COPY $sgpr0_sgpr1
-    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
+    %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
     %2:sgpr_32 = COPY $sgpr2
     %3:sgpr_32 = COPY $sgpr3
     %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2

diff  --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
index 311c19b4d49f0..1972d050df02e 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
@@ -4,14 +4,14 @@
   declare i32 @llvm.amdgcn.workitem.id.x() #0
 
   define amdgpu_kernel void @atomic_max_i32_noret(
-      i32 addrspace(1)* %out,
-      i32 addrspace(1)* addrspace(1)* %in,
-      i32 addrspace(1)* %x,
+      ptr addrspace(1) %out,
+      ptr addrspace(1) %in,
+      ptr addrspace(1) %x,
       i32 %y) #1 {
     %tid = call i32 @llvm.amdgcn.workitem.id.x()
     %idxprom = sext i32 %tid to i64
-    %tid.gep = getelementptr i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %in, i64 %idxprom
-    %ptr = load volatile i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %tid.gep
+    %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i64 %idxprom
+    %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep
     %xor = xor i32 %tid, 1
     %cmp = icmp ne i32 %xor, 0
     %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %cmp)
@@ -20,8 +20,8 @@
     br i1 %2, label %atomic, label %exit
 
   atomic:                                           ; preds = %0
-    %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 100
-    %ret = atomicrmw max i32 addrspace(1)* %gep, i32 %y seq_cst
+    %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
+    %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst
     br label %exit
 
   exit:                                             ; preds = %atomic, %0
@@ -75,7 +75,7 @@ body:             |
     successors: %bb.1.atomic(0x40000000), %bb.2.exit(0x40000000)
     liveins: $vgpr0, $sgpr0_sgpr1
 
-    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     $vgpr1 = V_ASHRREV_I32_e32 31, $vgpr0, implicit $exec
     $vgpr1_vgpr2 = V_LSHL_B64_e64 $vgpr0_vgpr1, 3, implicit $exec
     $sgpr7 = S_MOV_B32 61440
@@ -92,7 +92,7 @@ body:             |
     successors: %bb.2.exit(0x80000000)
     liveins: $sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, $sgpr0_sgpr1, $sgpr2_sgpr3, $vgpr1_vgpr2_vgpr3_vgpr4:0x00000003
 
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     dead $vgpr0 = V_MOV_B32_e32 -1, implicit $exec
     dead $vgpr0 = V_MOV_B32_e32 61440, implicit $exec
     $sgpr4_sgpr5 = S_MOV_B64 0

diff  --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir
index 8ba41362605ee..abf74d3efd144 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir
@@ -11,10 +11,10 @@ body:             |
 
     $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr2_sgpr3
     $vgpr1 = V_MOV_B32_e32 killed $sgpr3, implicit $exec, implicit $sgpr2_sgpr3, implicit $exec
-    renamable $vgpr2 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load syncscope("one-as") seq_cst (s32) from `i32 addrspace(42)* undef`)
+    renamable $vgpr2 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load syncscope("one-as") seq_cst (s32) from `ptr addrspace(42) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -30,7 +30,7 @@ body:             |
     $vgpr2 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `i32 addrspace(42)* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `ptr addrspace(42) undef`)
     S_ENDPGM 0
 
 ...
@@ -47,7 +47,7 @@ body:             |
     $vgpr0 = V_MOV_B32_e32 killed $sgpr4, implicit $exec, implicit $exec
     $vgpr1 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $exec
     $vgpr2 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_ATOMIC_CMPSWAP killed renamable $vgpr2_vgpr3, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("workgroup-one-as") seq_cst seq_cst (s32) on `i32 addrspace(42)* undef`)
+    FLAT_ATOMIC_CMPSWAP killed renamable $vgpr2_vgpr3, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("workgroup-one-as") seq_cst seq_cst (s32) on `ptr addrspace(42) undef`)
     S_ENDPGM 0
 
 ...
@@ -63,7 +63,7 @@ body:             |
     $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr2_sgpr3
     $vgpr1 = V_MOV_B32_e32 killed $sgpr3, implicit $exec, implicit $sgpr2_sgpr3, implicit $exec
     $vgpr2 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    FLAT_ATOMIC_SWAP killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("wavefront-one-as") seq_cst (s32) on `i32 addrspace(42)* undef`)
+    FLAT_ATOMIC_SWAP killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("wavefront-one-as") seq_cst (s32) on `ptr addrspace(42) undef`)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir
index aed7814e9310f..af4feb9634cb2 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir
@@ -13,14 +13,14 @@
 name:            load_singlethread_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -37,14 +37,14 @@ body:             |
 name:            load_singlethread_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -61,14 +61,14 @@ body:             |
 name:            load_singlethread_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -85,14 +85,14 @@ body:             |
 name:            load_singlethread_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -109,14 +109,14 @@ body:             |
 name:            load_wavefront_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -133,14 +133,14 @@ body:             |
 name:            load_wavefront_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -157,14 +157,14 @@ body:             |
 name:            load_wavefront_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -181,14 +181,14 @@ body:             |
 name:            load_wavefront_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -205,14 +205,14 @@ body:             |
 name:            load_workgroup_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -229,14 +229,14 @@ body:             |
 name:            load_workgroup_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -253,14 +253,14 @@ body:             |
 name:            load_workgroup_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -277,14 +277,14 @@ body:             |
 name:            load_workgroup_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -301,14 +301,14 @@ body:             |
 name:            load_agent_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -325,14 +325,14 @@ body:             |
 name:            load_agent_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -349,14 +349,14 @@ body:             |
 name:            load_agent_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -373,14 +373,14 @@ body:             |
 name:            load_agent_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -397,14 +397,14 @@ body:             |
 name:            load_system_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -421,14 +421,14 @@ body:             |
 name:            load_system_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -445,14 +445,14 @@ body:             |
 name:            load_system_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -469,14 +469,14 @@ body:             |
 name:            load_system_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `i32 addrspace(3)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `ptr addrspace(3) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -493,12 +493,12 @@ body:             |
 name:            store_singlethread_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -515,12 +515,12 @@ body:             |
 name:            store_singlethread_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -537,12 +537,12 @@ body:             |
 name:            store_singlethread_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -559,12 +559,12 @@ body:             |
 name:            store_singlethread_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -581,12 +581,12 @@ body:             |
 name:            store_wavefront_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -603,12 +603,12 @@ body:             |
 name:            store_wavefront_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -625,12 +625,12 @@ body:             |
 name:            store_wavefront_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -647,12 +647,12 @@ body:             |
 name:            store_wavefront_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -669,12 +669,12 @@ body:             |
 name:            store_workgroup_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -691,12 +691,12 @@ body:             |
 name:            store_workgroup_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -713,12 +713,12 @@ body:             |
 name:            store_workgroup_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -735,12 +735,12 @@ body:             |
 name:            store_workgroup_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -757,12 +757,12 @@ body:             |
 name:            store_agent_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -779,12 +779,12 @@ body:             |
 name:            store_agent_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -801,12 +801,12 @@ body:             |
 name:            store_agent_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -823,12 +823,12 @@ body:             |
 name:            store_agent_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -845,12 +845,12 @@ body:             |
 name:            store_system_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") unordered (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") unordered (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -867,12 +867,12 @@ body:             |
 name:            store_system_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") monotonic (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") monotonic (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -889,12 +889,12 @@ body:             |
 name:            store_system_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -911,12 +911,12 @@ body:             |
 name:            store_system_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -933,12 +933,12 @@ body:             |
 name:            atomicrmw_singlethread_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(3)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -955,12 +955,12 @@ body:             |
 name:            atomicrmw_singlethread_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(3)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -977,12 +977,12 @@ body:             |
 name:            atomicrmw_singlethread_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `i32 addrspace(3)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -999,12 +999,12 @@ body:             |
 name:            atomicrmw_singlethread_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(3)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -1021,12 +1021,12 @@ body:             |
 name:            atomicrmw_singlethread_acq_rel
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `i32 addrspace(3)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...
@@ -1043,12 +1043,12 @@ body:             |
 name:            atomicrmw_singlethread_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(3) undef`)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
index 721382cfba63e..3223a4bf2bb72 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
@@ -16,27 +16,27 @@ body:             |
     successors: %bb.1(0x30000000), %bb.2(0x50000000)
     liveins: $sgpr0_sgpr1, $sgpr3
 
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
-    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
     $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
     $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
     $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
     $vgpr0 = V_MOV_B32_e32 1, implicit $exec
-    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(5)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) undef`)
     S_WAITCNT 127
     S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
     S_WAITCNT 3855
     $vgpr0 = V_MOV_B32_e32 2, implicit $exec
     $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
-    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(5)* undef`)
+    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) undef`)
     S_CBRANCH_SCC0 %bb.1, implicit killed $scc
 
   bb.2:
     successors: %bb.3(0x80000000)
     liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     S_WAITCNT 3855
     $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
     S_BRANCH %bb.3
@@ -45,7 +45,7 @@ body:             |
     successors: %bb.3(0x80000000)
     liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     S_WAITCNT 3855
     $vgpr0 = V_MOV_B32_e32 4, implicit $exec
 
@@ -55,11 +55,11 @@ body:             |
     S_WAITCNT 127
     $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
     $vgpr0 = V_ADD_CO_U32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
-    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (load syncscope("agent-one-as") unordered (s32) from `i32 addrspace(1)* undef`), (load syncscope("workgroup-one-as") seq_cst (s32) from `[8192 x i32] addrspace(5)* undef`)
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (load syncscope("agent-one-as") unordered (s32) from `ptr addrspace(1) undef`), (load syncscope("workgroup-one-as") seq_cst (s32) from `ptr addrspace(5) undef`)
     $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
     $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
     S_WAITCNT 3952
-    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32 addrspace(1)* undef`)
+    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr addrspace(1) undef`)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir
index f836cc0b44ed6..13133de8fd537 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir
@@ -13,14 +13,14 @@
 name:            load_singlethread_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 1, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 1, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -37,14 +37,14 @@ body:             |
 name:            load_singlethread_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -61,14 +61,14 @@ body:             |
 name:            load_singlethread_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -85,14 +85,14 @@ body:             |
 name:            load_singlethread_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -109,14 +109,14 @@ body:             |
 name:            load_wavefront_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -133,14 +133,14 @@ body:             |
 name:            load_wavefront_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -157,14 +157,14 @@ body:             |
 name:            load_wavefront_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -181,14 +181,14 @@ body:             |
 name:            load_wavefront_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -205,14 +205,14 @@ body:             |
 name:            load_workgroup_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -229,14 +229,14 @@ body:             |
 name:            load_workgroup_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -253,14 +253,14 @@ body:             |
 name:            load_workgroup_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -277,14 +277,14 @@ body:             |
 name:            load_workgroup_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -301,14 +301,14 @@ body:             |
 name:            load_agent_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -325,14 +325,14 @@ body:             |
 name:            load_agent_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -349,14 +349,14 @@ body:             |
 name:            load_agent_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -373,14 +373,14 @@ body:             |
 name:            load_agent_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -397,14 +397,14 @@ body:             |
 name:            load_system_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -421,14 +421,14 @@ body:             |
 name:            load_system_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -445,14 +445,14 @@ body:             |
 name:            load_system_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -469,14 +469,14 @@ body:             |
 name:            load_system_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
-    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `i32 addrspace(2)* undef`)
+    renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `ptr addrspace(2) undef`)
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec
-    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
+    FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
     S_ENDPGM 0
 
 ...
@@ -493,12 +493,12 @@ body:             |
 name:            store_singlethread_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -515,12 +515,12 @@ body:             |
 name:            store_singlethread_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -537,12 +537,12 @@ body:             |
 name:            store_singlethread_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -559,12 +559,12 @@ body:             |
 name:            store_singlethread_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -581,12 +581,12 @@ body:             |
 name:            store_wavefront_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -603,12 +603,12 @@ body:             |
 name:            store_wavefront_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -625,12 +625,12 @@ body:             |
 name:            store_wavefront_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -647,12 +647,12 @@ body:             |
 name:            store_wavefront_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -669,12 +669,12 @@ body:             |
 name:            store_workgroup_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -691,12 +691,12 @@ body:             |
 name:            store_workgroup_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -713,12 +713,12 @@ body:             |
 name:            store_workgroup_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -735,12 +735,12 @@ body:             |
 name:            store_workgroup_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -757,12 +757,12 @@ body:             |
 name:            store_agent_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -779,12 +779,12 @@ body:             |
 name:            store_agent_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -801,12 +801,12 @@ body:             |
 name:            store_agent_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -823,12 +823,12 @@ body:             |
 name:            store_agent_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -845,12 +845,12 @@ body:             |
 name:            store_system_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store unordered (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store unordered (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -867,12 +867,12 @@ body:             |
 name:            store_system_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store monotonic (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store monotonic (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -889,12 +889,12 @@ body:             |
 name:            store_system_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -911,12 +911,12 @@ body:             |
 name:            store_system_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `i32 addrspace(2)* undef`)
+    DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -933,12 +933,12 @@ body:             |
 name:            atomicrmw_singlethread_unordered
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(2)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -955,12 +955,12 @@ body:             |
 name:            atomicrmw_singlethread_monotonic
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(2)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -977,12 +977,12 @@ body:             |
 name:            atomicrmw_singlethread_acquire
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `i32 addrspace(2)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -999,12 +999,12 @@ body:             |
 name:            atomicrmw_singlethread_release
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(2)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -1021,12 +1021,12 @@ body:             |
 name:            atomicrmw_singlethread_acq_rel
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `i32 addrspace(2)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...
@@ -1043,12 +1043,12 @@ body:             |
 name:            atomicrmw_singlethread_seq_cst
 body:             |
   bb.0:
-    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4)
-    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4)
     $m0 = S_MOV_B32 -1
     $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`)
+    $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(2) undef`)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir b/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir
index 7d0a273091fab..4feede7fc919f 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir
@@ -7,8 +7,8 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
-    %2:vgpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
+    %2:vgpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
 ...
 
 # GCN-LABEL: name: ds_read_b32_a_a
@@ -18,8 +18,8 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    %1:agpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
-    %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    %1:agpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
+    %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
 ...
 
 # GCN-LABEL: name: ds_read_b32_v_a
@@ -30,8 +30,8 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
-    %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
+    %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
 ...
 
 # GCN-LABEL: name: ds_read_b32_a_v
@@ -42,8 +42,8 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    %1:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
-    %2:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`)
+    %1:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
+    %2:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`)
 ...
 
 # GCN-LABEL: name: ds_write_b32_v_v
@@ -53,8 +53,8 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
-    DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
+    DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
 ...
 
 # GCN-LABEL: name: ds_write_b32_a_a
@@ -65,8 +65,8 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
-    DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
+    DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
 ...
 
 # GCN-LABEL: name: ds_write_b32_v_a
@@ -77,8 +77,8 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
-    DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
+    DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
 ...
 
 # GCN-LABEL: name: ds_write_b32_a_v
@@ -89,6 +89,6 @@ body:             |
   bb.0:
 
     %0:vgpr_32 = IMPLICIT_DEF
-    DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
-    DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`)
+    DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
+    DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`)
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir b/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir
index 21149d33afa89..83e841e22c60f 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir
@@ -10,13 +10,13 @@ name:            out_of_order_merge
 body:             |
   bb.0:
     %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-    %5:vreg_64 = DS_READ_B64_gfx9 %4, 776, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef`, addrspace 3)
-    %6:vreg_64 = DS_READ_B64_gfx9 %4, 784, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef` + 8, addrspace 3)
-    %17:vreg_64 = DS_READ_B64_gfx9 %4, 840, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef`, addrspace 3)
-    DS_WRITE_B64_gfx9 %4, %17, 8, 0, implicit $exec :: (store (s64) into `double addrspace(3)* undef` + 8, addrspace 3)
-    DS_WRITE_B64_gfx9 %4, %6, 0, 0, implicit $exec :: (store (s64) into `double addrspace(3)* undef`, align 16, addrspace 3)
-    %24:vreg_64 = DS_READ_B64_gfx9 %4, 928, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef` + 8, addrspace 3)
-    DS_WRITE_B64_gfx9 undef %29:vgpr_32, %5, 0, 0, implicit $exec :: (store (s64) into `double addrspace(3)* undef`, addrspace 3)
+    %5:vreg_64 = DS_READ_B64_gfx9 %4, 776, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef`, addrspace 3)
+    %6:vreg_64 = DS_READ_B64_gfx9 %4, 784, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef` + 8, addrspace 3)
+    %17:vreg_64 = DS_READ_B64_gfx9 %4, 840, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef`, addrspace 3)
+    DS_WRITE_B64_gfx9 %4, %17, 8, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) undef` + 8, addrspace 3)
+    DS_WRITE_B64_gfx9 %4, %6, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) undef`, align 16, addrspace 3)
+    %24:vreg_64 = DS_READ_B64_gfx9 %4, 928, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef` + 8, addrspace 3)
+    DS_WRITE_B64_gfx9 undef %29:vgpr_32, %5, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) undef`, addrspace 3)
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir b/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
index 8fa1ab9d4b97d..c8454531737f0 100644
--- a/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
@@ -4,7 +4,7 @@
 # Check that constant is in SGPR registers
 
 --- |
-  define amdgpu_kernel void @const_to_sgpr(i32 addrspace(1)* nocapture %arg, i64 %id) {
+  define amdgpu_kernel void @const_to_sgpr(ptr addrspace(1) nocapture %arg, i64 %id) {
   bb:
     br i1 undef, label %bb1, label %bb2
 
@@ -15,7 +15,7 @@
     ret void
   }
 
-  define amdgpu_kernel void @const_to_sgpr_multiple_use(i32 addrspace(1)* nocapture %arg, i64 %id1, i64 %id2) {
+  define amdgpu_kernel void @const_to_sgpr_multiple_use(ptr addrspace(1) nocapture %arg, i64 %id1, i64 %id2) {
   bb:
     br i1 undef, label %bb1, label %bb2
 
@@ -26,7 +26,7 @@
     ret void
   }
 
-  define amdgpu_kernel void @const_to_sgpr_subreg(i32 addrspace(1)* nocapture %arg, i64 %id) {
+  define amdgpu_kernel void @const_to_sgpr_subreg(ptr addrspace(1) nocapture %arg, i64 %id) {
   bb:
     br i1 undef, label %bb1, label %bb2
 

diff  --git a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index 5c708bd517a3b..039580f5dbe4a 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -6,7 +6,7 @@
     br i1 undef, label %if, label %end
 
   if:                                               ; preds = %main_body
-    %v.if = load volatile i32, i32 addrspace(1)* undef
+    %v.if = load volatile i32, ptr addrspace(1) undef
     br label %end
 
   end:                                              ; preds = %if, %main_body

diff  --git a/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir b/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir
index c76effa3ef6c7..83c30507ce3ce 100644
--- a/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir
+++ b/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir
@@ -46,7 +46,7 @@ body:             |
     %15:sreg_32_xm0 = S_MOV_B32 61440
     %16:sreg_32_xm0 = S_MOV_B32 -1
     %17:sgpr_128 = REG_SEQUENCE undef %14:sreg_32_xm0, %subreg.sub0, undef %12:sreg_32_xm0, %subreg.sub1, %16, %subreg.sub2, %15, %subreg.sub3
-    BUFFER_STORE_DWORD_OFFSET %4, %17, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+    BUFFER_STORE_DWORD_OFFSET %4, %17, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
     %19:vgpr_32 = COPY %4
     %20:sreg_64 = SI_IF %0, %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_BRANCH %bb.3

diff  --git a/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir b/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
index 4f54131bfe6a5..885907e4bee1e 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
@@ -8,7 +8,7 @@
 # CHECK: DBG_VALUE{{.*}} %13.sub2
 
 --- |
-  define amdgpu_kernel void @test(i32 addrspace(1)* %out) { ret void }
+  define amdgpu_kernel void @test(ptr addrspace(1) %out) { ret void }
 
   !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !4, producer: "llvm", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4)
   !1 = !DILocalVariable(name: "a", scope: !2, file: !4, line: 126, type: !6)
@@ -56,8 +56,8 @@ body:             |
 
     %3 = COPY killed $vgpr0
     %0 = COPY killed $sgpr0_sgpr1
-    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`)
-    %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`)
+    %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     %18 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     undef %19.sub0 = COPY killed %3
     %19.sub1 = COPY killed %18

diff  --git a/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll b/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll
index 959bc7f33426b..9d7797990287a 100644
--- a/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll
@@ -16,7 +16,7 @@
 ; CHECK-NEXT: - basic block: %bb.0
 ; CHECK-NEXT: - instruction: S_CBRANCH_SCC1 %bb.2, implicit killed $scc
 ; CHECK-NEXT: - operand 1:   implicit killed $scc
-define amdgpu_kernel void @kernel0(i32 addrspace(1)* %out, i32 %in) #1 {
+define amdgpu_kernel void @kernel0(ptr addrspace(1) %out, i32 %in) #1 {
   call void asm sideeffect "", "~{v[0:7]}" () #0
   call void asm sideeffect "", "~{v[8:15]}" () #0
   call void asm sideeffect "", "~{v[16:19]}"() #0

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir
index 56ab64e457f57..4f293d9e3f3c5 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir
@@ -2,9 +2,9 @@
 # RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=postmisched -verify-misched -o - %s | FileCheck %s
 
 --- |
-  define amdgpu_kernel void @no_sched_barrier(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @no_sched_barrier(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_0(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_1(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
 
   !0 = distinct !{!0}
   !1 = !{!1, !0}

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir
index fea30a37ab8c3..d846516183d37 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir
@@ -2,21 +2,21 @@
 # RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s
 
 --- |
-  define amdgpu_kernel void @no_sched_barrier(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_2(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_4(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_8(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_16(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_64(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_128(i32 addrspace(3)* noalias %out, i32 addrspace(3)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_256(i32 addrspace(3)* noalias %out, i32 addrspace(3)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_512(i32 addrspace(3)* noalias %out, i32 addrspace(3)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_masks_8_12(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_4_bundle(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_barrier_mask_0_bundle(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @no_sched_barrier(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_0(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_1(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_2(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_4(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_8(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_16(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_32(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_64(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_128(ptr addrspace(3) noalias %out, ptr addrspace(3) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_256(ptr addrspace(3) noalias %out, ptr addrspace(3) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_512(ptr addrspace(3) noalias %out, ptr addrspace(3) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_masks_8_12(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_4_bundle(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_barrier_mask_0_bundle(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
 
   !0 = distinct !{!0}
   !1 = !{!1, !0}

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
index bf52d6f7a4ea4..67cc6ed52153a 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
@@ -3,10 +3,10 @@
 # RUN: llc -march=amdgcn -mcpu=gfx908 -amdgpu-igrouplp-exact-solver -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=EXACT %s
 
 --- |
-  define amdgpu_kernel void @sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_group_barrier_2_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_group_barrier_3_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_2_separate_pipes(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_3_separate_pipes(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
 
   !0 = distinct !{!0}
   !1 = !{!1, !0}

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
index 6c10c8f09474c..dc3aae813c62f 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
@@ -3,10 +3,10 @@
 # RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=machine-scheduler -amdgpu-igrouplp-exact-solver -verify-misched -o - %s | FileCheck -check-prefix=EXACT %s
 
 --- |
-  define amdgpu_kernel void @no_sched_group_barrier(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_group_barrier_1_VMEM_READ_1_VALU_5_MFMA_1_VMEM_READ_3_VALU_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_group_barrier_2_VMEM_1000_ALU_5_MFMA_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
-  define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @no_sched_group_barrier(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_1_VMEM_READ_1_VALU_5_MFMA_1_VMEM_READ_3_VALU_2_VMEM_WRITE(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_2_VMEM_1000_ALU_5_MFMA_2_VMEM_WRITE(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
 
   !0 = distinct !{!0}
   !1 = !{!1, !0}

diff  --git a/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir b/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir
index c2e6aae873cce..f1a8af42e6347 100644
--- a/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir
+++ b/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs  -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s
 
 --- |
-  define amdgpu_kernel void @schedule_ilp(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @schedule_ilp(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void }
 
   !0 = distinct !{!0}
   !1 = !{!1, !0}

diff  --git a/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir b/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir
index 469a9eef4d6a5..b4755c3b51b41 100644
--- a/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir
+++ b/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir
@@ -62,7 +62,7 @@ body:             |
     liveins: $sgpr4_sgpr5
 
     %1 = COPY $sgpr4_sgpr5
-    %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     $m0 = S_MOV_B32 -1
     %7 = COPY %5
     %6 = DS_READ_B32 %7, 0, 0, implicit $m0, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
index 84e890d31c397..efbdbca9da6b7 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir
@@ -11,9 +11,9 @@
 
 --- |
 
-  define void @sgpr_spill_wrong_stack_id(float addrspace(1)* nocapture readnone %arg, float addrspace(1)* noalias %arg1) {
+  define void @sgpr_spill_wrong_stack_id(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) {
   bb:
-    %tmp = load i32, i32 addrspace(1)* null, align 4
+    %tmp = load i32, ptr addrspace(1) null, align 4
     call void @func(i32 undef)
     call void @func(i32 %tmp)
     unreachable

diff  --git a/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir b/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir
index 3c3ce442039e1..a0e20da9cf77a 100644
--- a/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir
+++ b/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir
@@ -36,7 +36,7 @@ body:             |
     %3.sub1:sgpr_128 = S_AND_B32 %2, 65535, implicit-def dead $scc
     %3.sub3:sgpr_128 = S_MOV_B32 151468
     %3.sub2:sgpr_128 = S_MOV_B32 -1
-    %7.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %7, 48, 0 :: (load (s32) from `i8 addrspace(4)* undef`, addrspace 4)
+    %7.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %7, 48, 0 :: (load (s32) from `ptr addrspace(4) undef`, addrspace 4)
     %8:sreg_64_xexec = S_BUFFER_LOAD_DWORDX2_IMM %3, 640, 0 :: (dereferenceable invariant load (s64))
     undef %9.sub0:vreg_128 = V_LSHL_ADD_U32_e64 %6, 4, %4, implicit $exec
     %9.sub1:vreg_128 = V_LSHL_ADD_U32_e64 %5, 4, %0, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir b/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
index 103a8c025fe23..f9853017b9d3f 100644
--- a/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
@@ -44,7 +44,7 @@ body:             |
     liveins: $vgpr0, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13
 
     $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec
-    $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `i1 addrspace(4)* undef`)
+    $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `ptr addrspace(4) undef`)
     $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec
     $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec
     $vgpr1 = V_CNDMASK_B32_e64 0, 0, 0, -1, killed $sgpr0_sgpr1, implicit $exec
@@ -109,7 +109,7 @@ body:             |
     liveins: $vgpr0, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13
 
     $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec
-    $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `i1 addrspace(4)* undef`)
+    $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `ptr addrspace(4) undef`)
     $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec
     $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec
     $vgpr1 = V_CNDMASK_B32_e64 0, 0, 0, -1, killed $sgpr0_sgpr1, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir
index 9841a8cd0b107..fe0afd28a3466 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir
@@ -2,10 +2,10 @@
 # RUN: llc -march=amdgcn -mcpu=gfx90a -run-pass si-insert-waitcnts -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
 
 --- |
-  define amdgpu_kernel void @flat_zero_waitcnt(i32 addrspace(1)* %global4,
-                                 <4 x i32> addrspace(1)* %global16,
-                                 i32* %flat4,
-                                 <4 x i32>* %flat16) {
+  define amdgpu_kernel void @flat_zero_waitcnt(ptr addrspace(1) %global4,
+                                 ptr addrspace(1) %global16,
+                                 ptr %flat4,
+                                 ptr %flat16) {
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
index 9619808755140..cfad6f4a6c432 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
@@ -13,8 +13,8 @@ body:             |
 
     $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr1_vgpr2
     $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr1_vgpr2
-    $vgpr4 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1)
-    $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1)
+    $vgpr4 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1)
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1)
     $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 3, killed $sgpr4, implicit $exec
     $vgpr3 = V_CNDMASK_B32_e64 0, -1082130432, 0, 1065353216, killed $sgpr0_sgpr1, implicit $exec
     $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec, implicit $exec
@@ -23,7 +23,7 @@ body:             |
   bb.3:
     successors: %bb.1
 
-    $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1)
+    $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1)
 
   bb.1:
     successors: %bb.5, %bb.2
@@ -43,7 +43,7 @@ body:             |
   bb.4:
     successors: %bb.3, %bb.1
 
-    $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1)
+    $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1)
     $vgpr4 = V_CVT_I32_F32_e32 $vgpr5, implicit $mode, implicit $exec
     V_CMP_EQ_U32_e32 2, killed $vgpr4, implicit-def $vcc, implicit $exec
     $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc

diff  --git a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir
index d20d7306674af..01ebe4422b2ff 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir
@@ -12,7 +12,7 @@ machineFunctionInfo:
 body: |
   bb.0:
     liveins: $sgpr0_sgpr1
-    $sgpr4 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`)
+    $sgpr4 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`)
     S_WAITCNT_VSCNT undef $sgpr_null, 0
     $vgpr0 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 0, 1, implicit $exec :: (load store syncscope("agent") seq_cst (s32), addrspace 1)
     S_CMP_LG_U32 killed $sgpr4, 0, implicit-def $scc

diff  --git a/llvm/test/CodeGen/AMDGPU/waitcnt.mir b/llvm/test/CodeGen/AMDGPU/waitcnt.mir
index b7da7115d0bd6..5cc43fdc6662f 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt.mir
@@ -2,10 +2,10 @@
 # RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass si-insert-waitcnts  %s -o - | FileCheck -check-prefixes=CHECK,GFX89 %s
 
 --- |
-  define amdgpu_kernel void @flat_zero_waitcnt(i32 addrspace(1)* %global4,
-                                 <4 x i32> addrspace(1)* %global16,
-                                 i32* %flat4,
-                                 <4 x i32>* %flat16) {
+  define amdgpu_kernel void @flat_zero_waitcnt(ptr addrspace(1) %global4,
+                                 ptr addrspace(1) %global16,
+                                 ptr %flat4,
+                                 ptr %flat16) {
     ret void
   }
 
@@ -350,12 +350,12 @@ body: |
 name: waitcnt_backedge
 body: |
   bb.0:
-    renamable $sgpr4_sgpr5_sgpr6_sgpr7 = S_LOAD_DWORDX4_IMM renamable $sgpr2_sgpr3, 32, 0 :: (load (s128) from `i32 addrspace(4)* undef`, addrspace 4)
+    renamable $sgpr4_sgpr5_sgpr6_sgpr7 = S_LOAD_DWORDX4_IMM renamable $sgpr2_sgpr3, 32, 0 :: (load (s128) from `ptr addrspace(4) undef`, addrspace 4)
 
   bb.4:
     renamable $sgpr10_sgpr11 = S_CSELECT_B64 -1, 0, implicit killed $scc
     renamable $vgpr1 = BUFFER_LOAD_DWORD_OFFEN killed renamable $vgpr5, renamable $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4)
-    renamable $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM killed renamable $sgpr0_sgpr1, 0, 0 :: (load (s64) from `i32 addrspace(4)* undef`, align 4, addrspace 4)
+    renamable $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM killed renamable $sgpr0_sgpr1, 0, 0 :: (load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4)
     S_CBRANCH_SCC0 %bb.9, implicit killed $scc
 
   bb.9:

diff  --git a/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir
index d3c58ad6c3cc5..812ac23f151fa 100644
--- a/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir
+++ b/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir
@@ -8,17 +8,17 @@
   %struct.s = type opaque
 
   ; Function Attrs: nounwind
-  define arm_aapcscc i32 @f(%struct.s* %s, i32 %u, i8* %b, i32 %n) #0 !dbg !4 {
+  define arm_aapcscc i32 @f(ptr %s, i32 %u, ptr %b, i32 %n) #0 !dbg !4 {
   entry:
-    tail call void @llvm.dbg.value(metadata %struct.s* %s, i64 0, metadata !18, metadata !27), !dbg !28
+    tail call void @llvm.dbg.value(metadata ptr %s, i64 0, metadata !18, metadata !27), !dbg !28
     tail call void @llvm.dbg.value(metadata i32 %u, i64 0, metadata !19, metadata !27), !dbg !28
-    tail call void @llvm.dbg.value(metadata i8* %b, i64 0, metadata !20, metadata !27), !dbg !28
+    tail call void @llvm.dbg.value(metadata ptr %b, i64 0, metadata !20, metadata !27), !dbg !28
     tail call void @llvm.dbg.value(metadata i32 %n, i64 0, metadata !21, metadata !27), !dbg !28
     %cmp = icmp ult i32 %n, 4, !dbg !29
     br i1 %cmp, label %return, label %if.end, !dbg !31
 
   if.end:                                           ; preds = %entry
-    tail call arm_aapcscc void @g(%struct.s* %s, i8* %b, i32 %n) #3, !dbg !32
+    tail call arm_aapcscc void @g(ptr %s, ptr %b, i32 %n) #3, !dbg !32
     br label %return, !dbg !33
 
   return:                                           ; preds = %if.end, %entry
@@ -26,7 +26,7 @@
     ret i32 %retval.0, !dbg !34
   }
 
-  declare arm_aapcscc void @g(%struct.s*, i8*, i32) #1
+  declare arm_aapcscc void @g(ptr, ptr, i32) #1
 
   ; Function Attrs: nounwind readnone
   declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #2

diff  --git a/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir b/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
index 6bd35b05851e4..a6fc4dad49fd2 100644
--- a/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
+++ b/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
@@ -11,7 +11,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { nounwind readnone speculatable "target-features"="+vfp4" }
   attributes #1 = { "target-features"="+vfp4" }

diff  --git a/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir b/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir
index 96a02344ee79f..0b445261e8786 100644
--- a/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir
+++ b/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir
@@ -15,21 +15,21 @@
     %retval = alloca i32, align 4
     %mul = alloca i32, align 4
     %mul1 = mul nsw i32 %a, %b
-    store i32 %mul1, i32* %mul, align 4
-    %0 = load i32, i32* %mul, align 4
+    store i32 %mul1, ptr %mul, align 4
+    %0 = load i32, ptr %mul, align 4
     %cmp = icmp sle i32 %0, 0
     br i1 %cmp, label %if.then, label %if.end
 
   if.then:                                          ; preds = %entry
-    store i32 42, i32* %retval, align 4
+    store i32 42, ptr %retval, align 4
     br label %return
 
   if.end:                                           ; preds = %entry
-    store i32 1, i32* %retval, align 4
+    store i32 1, ptr %retval, align 4
     br label %return
 
   return:                                           ; preds = %if.end, %if.then
-    %1 = load i32, i32* %retval, align 4
+    %1 = load i32, ptr %retval, align 4
     ret i32 %1
   }
 

diff  --git a/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir b/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir
index a5774e49f5e3e..47f4e1a0a90f6 100644
--- a/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir
+++ b/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir
@@ -4,17 +4,17 @@
   target triple = "thumbebv8m.main-arm-none-eabi"
 
   ; Function Attrs: cmse_nonsecure_entry nounwind
-  define hidden arm_aapcs_vfpcc void @secure_foo(void (double, double, double, double, double, double, double, double)* %fptr) local_unnamed_addr #0 {
+  define hidden arm_aapcs_vfpcc void @secure_foo(ptr %fptr) local_unnamed_addr #0 {
   entry:
-    %0 = ptrtoint void (double, double, double, double, double, double, double, double)* %fptr to i32
+    %0 = ptrtoint ptr %fptr to i32
     %and = and i32 %0, -2
-    %1 = inttoptr i32 %and to void (double, double, double, double, double, double, double, double)*
+    %1 = inttoptr i32 %and to ptr
     call arm_aapcs_vfpcc void %1(double 0.000000e+00, double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00, double 5.000000e+00, double 6.000000e+00, double 7.000000e+00) #2
     ret void
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { "cmse_nonsecure_entry" nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+8msecext,+armv8-m.main,-d32,-fp64,+fp-armv8,+hwdiv,+thumb-mode,-crypto,-fullfp16,-neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/ARM/codesize-ifcvt.mir b/llvm/test/CodeGen/ARM/codesize-ifcvt.mir
index 32c4254996447..3acbcf127d8a3 100644
--- a/llvm/test/CodeGen/ARM/codesize-ifcvt.mir
+++ b/llvm/test/CodeGen/ARM/codesize-ifcvt.mir
@@ -14,7 +14,7 @@
     br label %b5
 
   b3:                                               ; preds = %b1
-    %v1 = load i32, i32* undef, align 4
+    %v1 = load i32, ptr undef, align 4
     %v2 = and i32 %v1, 256
     br label %b5
 
@@ -48,7 +48,7 @@
     br label %b5
 
   b3:                                               ; preds = %b1
-    %v1 = load i32, i32* undef, align 4
+    %v1 = load i32, ptr undef, align 4
     %v2 = and i32 %v1, 256
     br label %b5
 
@@ -82,7 +82,7 @@
     br label %b5
 
   b3:                                               ; preds = %b1
-    %v1 = load i32, i32* undef, align 4
+    %v1 = load i32, ptr undef, align 4
     %v2 = and i32 %v1, 256
     br label %b5
 
@@ -106,7 +106,7 @@
   declare i32 @extfunc()
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { optsize }
   attributes #1 = { minsize }
@@ -179,7 +179,7 @@ body:             |
   ; CHECK-V7:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-V7: bb.3.b3:
   ; CHECK-V7:   successors: %bb.4(0x80000000)
-  ; CHECK-V7:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`)
+  ; CHECK-V7:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`)
   ; CHECK-V7:   renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-V7: bb.4.b5:
   ; CHECK-V7:   successors: %bb.5(0x50000000)
@@ -213,7 +213,7 @@ body:             |
   ; CHECK-V8:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-V8: bb.3.b3:
   ; CHECK-V8:   successors: %bb.4(0x80000000)
-  ; CHECK-V8:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`)
+  ; CHECK-V8:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`)
   ; CHECK-V8:   renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-V8: bb.4.b5:
   ; CHECK-V8:   successors: %bb.5(0x50000000)
@@ -253,7 +253,7 @@ body:             |
   bb.3.b3:
     successors: %bb.4(0x80000000)
 
-    renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `i32* undef`)
+    renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `ptr undef`)
     renamable $r0 = t2ANDri killed renamable $r0, 256, 14, $noreg, $noreg
 
   bb.4.b5:
@@ -341,7 +341,7 @@ body:             |
   ; CHECK-V7:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-V7: bb.3.b3:
   ; CHECK-V7:   successors: %bb.4(0x80000000)
-  ; CHECK-V7:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`)
+  ; CHECK-V7:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`)
   ; CHECK-V7:   renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-V7: bb.4.b5:
   ; CHECK-V7:   successors: %bb.5(0x30000000), %bb.6(0x50000000)
@@ -378,7 +378,7 @@ body:             |
   ; CHECK-V8:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-V8: bb.3.b3:
   ; CHECK-V8:   successors: %bb.4(0x80000000)
-  ; CHECK-V8:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`)
+  ; CHECK-V8:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`)
   ; CHECK-V8:   renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-V8: bb.4.b5:
   ; CHECK-V8:   successors: %bb.5(0x30000000), %bb.6(0x50000000)
@@ -421,7 +421,7 @@ body:             |
   bb.3.b3:
     successors: %bb.4(0x80000000)
 
-    renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `i32* undef`)
+    renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `ptr undef`)
     renamable $r0 = t2ANDri killed renamable $r0, 256, 14, $noreg, $noreg
 
   bb.4.b5:
@@ -509,7 +509,7 @@ body:             |
   ; CHECK-V7:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-V7: bb.3.b3:
   ; CHECK-V7:   successors: %bb.4(0x80000000)
-  ; CHECK-V7:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`)
+  ; CHECK-V7:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`)
   ; CHECK-V7:   renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-V7: bb.4.b5:
   ; CHECK-V7:   successors: %bb.5(0x30000000), %bb.6(0x50000000)
@@ -546,7 +546,7 @@ body:             |
   ; CHECK-V8:   t2B %bb.4, 14 /* CC::al */, $noreg
   ; CHECK-V8: bb.3.b3:
   ; CHECK-V8:   successors: %bb.4(0x80000000)
-  ; CHECK-V8:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`)
+  ; CHECK-V8:   renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`)
   ; CHECK-V8:   renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg
   ; CHECK-V8: bb.4.b5:
   ; CHECK-V8:   successors: %bb.5(0x30000000), %bb.6(0x50000000)
@@ -589,7 +589,7 @@ body:             |
   bb.3.b3:
     successors: %bb.4(0x80000000)
 
-    renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `i32* undef`)
+    renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `ptr undef`)
     renamable $r0 = t2ANDri killed renamable $r0, 256, 14, $noreg, $noreg
 
   bb.4.b5:

diff  --git a/llvm/test/CodeGen/ARM/const-load-align-thumb.mir b/llvm/test/CodeGen/ARM/const-load-align-thumb.mir
index 7b2697d0f2195..3bab48959cb01 100644
--- a/llvm/test/CodeGen/ARM/const-load-align-thumb.mir
+++ b/llvm/test/CodeGen/ARM/const-load-align-thumb.mir
@@ -6,8 +6,8 @@
   define hidden i32 @main() {
   entry:
     %P5 = alloca half, align 2
-    store half 0xH3FE0, half* %P5, align 2
-    %0 = load half, half* %P5, align 2
+    store half 0xH3FE0, ptr %P5, align 2
+    %0 = load half, ptr %P5, align 2
     call void @z_bar(half %0)
     ret i32 0
   }

diff  --git a/llvm/test/CodeGen/ARM/dbg-range-extension.mir b/llvm/test/CodeGen/ARM/dbg-range-extension.mir
index 75eb466f7cb41..04bfc1d8f09a8 100644
--- a/llvm/test/CodeGen/ARM/dbg-range-extension.mir
+++ b/llvm/test/CodeGen/ARM/dbg-range-extension.mir
@@ -104,7 +104,7 @@
   declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #2
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #3
+  declare void @llvm.stackprotector(ptr, ptr) #3
   
   attributes #0 = { minsize nounwind optsize }
   attributes #1 = { minsize optsize }

diff  --git a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir
index d1703009e219f..8e671c903adda 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir
@@ -10,17 +10,17 @@
 
   declare i32 @llvm.arm.space(i32, i32) #0
 
-  define dso_local i32 @ARM(i64* %LL, i32 %A.coerce) local_unnamed_addr #1 {
+  define dso_local i32 @ARM(ptr %LL, i32 %A.coerce) local_unnamed_addr #1 {
   entry:
     %S = alloca half, align 2
     %tmp.0.extract.trunc = trunc i32 %A.coerce to i16
     %0 = bitcast i16 %tmp.0.extract.trunc to half
-    store volatile half 0xH3C00, half* %S, align 2
-    store volatile i64 4242424242424242, i64* %LL, align 8
+    store volatile half 0xH3C00, ptr %S, align 2
+    store volatile i64 4242424242424242, ptr %LL, align 8
     %1 = call i32 @llvm.arm.space(i32 8920, i32 undef)
-    %S.0.S.0.570 = load volatile half, half* %S, align 2
+    %S.0.S.0.570 = load volatile half, ptr %S, align 2
     %add298 = fadd half %S.0.S.0.570, 0xH2E66
-    store volatile half %add298, half* %S, align 2
+    store volatile half %add298, ptr %S, align 2
     %2 = call i32 @llvm.arm.space(i32 1350, i32 undef)
     %3 = bitcast half %add298 to i16
     %tmp343.0.insert.ext = zext i16 %3 to i32

diff  --git a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir
index ca89912fafa0f..03ddd80ed0ead 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir
@@ -16,14 +16,14 @@
     %S = alloca half, align 2
     %tmp.0.extract.trunc = trunc i32 %A.coerce to i16
     %0 = bitcast i16 %tmp.0.extract.trunc to half
-    store volatile float 4.200000e+01, float* %F, align 4
-    store volatile half 0xH3C00, half* %S, align 2
-    %S.0.S.0.142 = load volatile half, half* %S, align 2
+    store volatile float 4.200000e+01, ptr %F, align 4
+    store volatile half 0xH3C00, ptr %S, align 2
+    %S.0.S.0.142 = load volatile half, ptr %S, align 2
     %1 = call i32 @llvm.arm.space(i32 1230, i32 undef)
     %add42 = fadd half %S.0.S.0.142, 0xH2E66
-    store volatile half %add42, half* %S, align 2
+    store volatile half %add42, ptr %S, align 2
     %2 = call i32 @llvm.arm.space(i32 1330, i32 undef)
-    %S.0.S.0.119 = load volatile half, half* %S, align 2
+    %S.0.S.0.119 = load volatile half, ptr %S, align 2
     %3 = bitcast half %add42 to i16
     %tmp87.0.insert.ext = zext i16 %3 to i32
     ret i32 %tmp87.0.insert.ext

diff  --git a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
index 065cc3b814a14..bd343ebef26ad 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir
@@ -14,8 +14,8 @@
   define dso_local i32 @CP() #1 {
   entry:
     %res = alloca half, align 2
-    store half 0xH706B, half* %res, align 2
-    %0 = load half, half* %res, align 2
+    store half 0xH706B, ptr %res, align 2
+    %0 = load half, ptr %res, align 2
     %tobool = fcmp une half %0, 0xH0000
     br i1 %tobool, label %LA, label %END
 
@@ -29,7 +29,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { nounwind "target-features"="+v8.2a,+fullfp16" }
   attributes #1 = { "target-features"="+v8.2a,+fullfp16" }

diff  --git a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
index 38348e5b67e1e..1f8e6b0ad4216 100644
--- a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
+++ b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir
@@ -15,8 +15,8 @@
   define dso_local i32 @CP() #1 {
   entry:
     %res = alloca half, align 2
-    store half 0xH706B, half* %res, align 2
-    %0 = load half, half* %res, align 2
+    store half 0xH706B, ptr %res, align 2
+    %0 = load half, ptr %res, align 2
     %tobool = fcmp une half %0, 0xH0000
     br i1 %tobool, label %LA, label %END
 
@@ -30,7 +30,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { nounwind "target-features"="+v8.2a,+fullfp16" }
   attributes #1 = { "target-features"="+v8.2a,+fullfp16" }

diff  --git a/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir b/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir
index 98a41cee69513..ab788f7011a12 100644
--- a/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir
+++ b/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir
@@ -6,11 +6,11 @@
 --- |
   target triple = "thumbv7-unknown-linux-gnueabi"
 
-  define dso_local i8* @fn1() {
+  define dso_local ptr @fn1() {
   entry:
     br label %l_yes
   l_yes:
-    ret i8* blockaddress(@fn1, %l_yes)
+    ret ptr blockaddress(@fn1, %l_yes)
   }
 
   declare dso_local i32 @fn2(...)

diff  --git a/llvm/test/CodeGen/ARM/machine-sink-multidef.mir b/llvm/test/CodeGen/ARM/machine-sink-multidef.mir
index 5952538af95d1..a91cedeed841e 100644
--- a/llvm/test/CodeGen/ARM/machine-sink-multidef.mir
+++ b/llvm/test/CodeGen/ARM/machine-sink-multidef.mir
@@ -9,12 +9,12 @@
 
   @e = external constant [2 x %struct.anon], align 4
 
-  define arm_aapcscc void @g(i32 * noalias %a, i32 *%b, i32 %x) {
+  define arm_aapcscc void @g(ptr noalias %a, ptr %b, i32 %x) {
   entry:
-    %c = getelementptr inbounds [2 x %struct.anon], [2 x %struct.anon]* @e, i32 0, i32 %x, i32 0
-    %l1 = load i32, i32* %c, align 4
-    %d = getelementptr inbounds [2 x %struct.anon], [2 x %struct.anon]* @e, i32 0, i32 %x, i32 1
-    %l2 = load i32, i32* %d, align 4
+    %c = getelementptr inbounds [2 x %struct.anon], ptr @e, i32 0, i32 %x, i32 0
+    %l1 = load i32, ptr %c, align 4
+    %d = getelementptr inbounds [2 x %struct.anon], ptr @e, i32 0, i32 %x, i32 1
+    %l2 = load i32, ptr %d, align 4
     br i1 undef, label %land.lhs.true, label %if.end
 
   land.lhs.true:                                    ; preds = %entry

diff  --git a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
index 06b34acd4e05d..be5340bd51abb 100644
--- a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
+++ b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -17,11 +17,11 @@
 
   define i64 @foo(i16 signext %a, i16 signext %b) {
   entry:
-    %0 = load i32, i32* @g1, align 4
-    %1 = load i32, i32* @g2, align 4
+    %0 = load i32, ptr @g1, align 4
+    %1 = load i32, ptr @g2, align 4
     %2 = add nuw nsw i32 %0, %0
     %3 = sdiv i32 %2, %1
-    store i32 %3, i32* @g1, align 4
+    store i32 %3, ptr @g1, align 4
     %d = mul nsw i16 %a, %a
     %e = mul nsw i16 %b, %b
     %f = add nuw nsw i16 %e, %d

diff  --git a/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir b/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir
index 2cf0bc80ae73c..07f12c0bc8cd8 100644
--- a/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir
+++ b/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir
@@ -10,7 +10,7 @@
   define void @noret() noreturn nounwind {
   start:
     %p = alloca i32
-    store i32 42, i32* %p
+    store i32 42, ptr %p
     unreachable
   }
 ...

diff  --git a/llvm/test/CodeGen/ARM/pei-swiftself.mir b/llvm/test/CodeGen/ARM/pei-swiftself.mir
index cd75589328618..f7b702f58e853 100644
--- a/llvm/test/CodeGen/ARM/pei-swiftself.mir
+++ b/llvm/test/CodeGen/ARM/pei-swiftself.mir
@@ -1,6 +1,6 @@
 # RUN: llc -o - %s -mtriple=arm-- -run-pass prologepilog | FileCheck %s
 --- |
-  define swiftcc i8* @need_emergency_slot(i8 *swiftself %v) {
+  define swiftcc ptr @need_emergency_slot(ptr swiftself %v) {
     ; Just a dummy to add a swiftself bit. The real code is in the MI below.
     unreachable
   }

diff  --git a/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir b/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir
index 0d1ea4891614c..689aa3d70f03b 100644
--- a/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir
+++ b/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir
@@ -2,14 +2,14 @@
 --- |
   target triple = "thumbv7---eabi"
 
-  define void @ldrd_strd_aa(i32* noalias nocapture %x, i32* noalias nocapture readonly %y) {
+  define void @ldrd_strd_aa(ptr noalias nocapture %x, ptr noalias nocapture readonly %y) {
   entry:
-    %0 = load i32, i32* %y, align 4
-    store i32 %0, i32* %x, align 4
-    %arrayidx2 = getelementptr inbounds i32, i32* %y, i32 1
-    %1 = load i32, i32* %arrayidx2, align 4
-    %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 1
-    store i32 %1, i32* %arrayidx3, align 4
+    %0 = load i32, ptr %y, align 4
+    store i32 %0, ptr %x, align 4
+    %arrayidx2 = getelementptr inbounds i32, ptr %y, i32 1
+    %1 = load i32, ptr %arrayidx2, align 4
+    %arrayidx3 = getelementptr inbounds i32, ptr %x, i32 1
+    store i32 %1, ptr %arrayidx3, align 4
     ret void
   }
 ...

diff  --git a/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir b/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir
index 42a6fda35adb2..7d939458ee79b 100644
--- a/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir
+++ b/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir
@@ -4,12 +4,12 @@
 --- |
   target triple = "thumbv7---eabi"
 
-  define void @a(i32* nocapture %x, i32 %y, i32 %z) {
+  define void @a(ptr nocapture %x, i32 %y, i32 %z) {
   entry:
     ret void
   }
 
-  define void @b(i32* nocapture %x, i32 %y, i32 %z) {
+  define void @b(ptr nocapture %x, i32 %y, i32 %z) {
   entry:
     ret void
   }

diff  --git a/llvm/test/CodeGen/ARM/single-issue-r52.mir b/llvm/test/CodeGen/ARM/single-issue-r52.mir
index af393be3a27af..05b167316ca4e 100644
--- a/llvm/test/CodeGen/ARM/single-issue-r52.mir
+++ b/llvm/test/CodeGen/ARM/single-issue-r52.mir
@@ -9,14 +9,14 @@
 
   %struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
   ; Function Attrs: nounwind
-  define <8 x i8> @foo(i8* %A) {
-    %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8* %A, i32 8)
+  define <8 x i8> @foo(ptr %A) {
+    %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0(ptr %A, i32 8)
     %tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
     %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 1
     %tmp4 = add <8 x i8> %tmp2, %tmp3
     ret <8 x i8> %tmp4
   }
-  declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8*, i32)
+  declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0(ptr, i32)
 
 # CHECK: ********** MI Scheduling **********
 # CHECK: ScheduleDAGMILive::schedule starting

diff  --git a/llvm/test/CodeGen/ARM/stack_frame_offset.mir b/llvm/test/CodeGen/ARM/stack_frame_offset.mir
index f2cdb8007e552..e387e079aa20d 100644
--- a/llvm/test/CodeGen/ARM/stack_frame_offset.mir
+++ b/llvm/test/CodeGen/ARM/stack_frame_offset.mir
@@ -8,26 +8,26 @@
   define i32 @testpos() {
   entry:
     %a = alloca i32, align 4
-    call void @other(i32* %a)
-    %b = load i32, i32* %a, align 4
+    call void @other(ptr %a)
+    %b = load i32, ptr %a, align 4
     ret i32 %b
   }
   define i32 @testneg4() {
   entry:
     %a = alloca i32, align 4
-    call void @other(i32* %a)
-    %b = load i32, i32* %a, align 4
+    call void @other(ptr %a)
+    %b = load i32, ptr %a, align 4
     ret i32 %b
   }
   define i32 @testneg8() {
   entry:
     %a = alloca i32, align 4
-    call void @other(i32* %a)
-    %b = load i32, i32* %a, align 4
+    call void @other(ptr %a)
+    %b = load i32, ptr %a, align 4
     ret i32 %b
   }
 
-  declare void @other(i32*)
+  declare void @other(ptr)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/ARM/store-prepostinc.mir b/llvm/test/CodeGen/ARM/store-prepostinc.mir
index b974bc29838ca..5d76f9b24168a 100644
--- a/llvm/test/CodeGen/ARM/store-prepostinc.mir
+++ b/llvm/test/CodeGen/ARM/store-prepostinc.mir
@@ -5,27 +5,27 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv7a-none-unknown-eabi"
 
-  define i8* @STR_pre4(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre8(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre255(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre256(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre1024(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre4095(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre4096(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_prem1024(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_prem4095(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_prem4096(i8* %p, i32 %v) { unreachable }
-
-  define i8* @STR_post4(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post8(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post255(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post256(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post1024(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post4095(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post4096(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_postm1024(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_postm4095(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_postm4096(i8* %p, i32 %v) { unreachable }
+  define ptr @STR_pre4(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre8(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre255(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre256(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre1024(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre4095(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre4096(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_prem1024(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_prem4095(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_prem4096(ptr %p, i32 %v) { unreachable }
+
+  define ptr @STR_post4(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post8(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post255(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post256(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post1024(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post4095(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post4096(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_postm1024(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_postm4095(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_postm4096(ptr %p, i32 %v) { unreachable }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir b/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir
index ec6f9ef8e9819..023f7a9c58c94 100644
--- a/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir
+++ b/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir
@@ -20,8 +20,8 @@
   target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv6m-none--eabi"
   
-  define void @foo(i8 %in, i32* %addr) {
-    store i32 12345678, i32* %addr
+  define void @foo(i8 %in, ptr %addr) {
+    store i32 12345678, ptr %addr
     %1 = call i32 @llvm.arm.space(i32 980, i32 undef)
     %2 = zext i8 %in to i32
     switch i32 %2, label %default [
@@ -99,8 +99,8 @@
     unreachable
   }
 
-  define void @bar(i8 %in, i32* %addr) {
-      store i32 12345678, i32* %addr
+  define void @bar(i8 %in, ptr %addr) {
+      store i32 12345678, ptr %addr
     %1 = zext i8 %in to i32
     switch i32 %1, label %default [
       i32 0, label %d1
@@ -181,7 +181,7 @@
   declare i32 @llvm.arm.space(i32, i32) #0
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
   
   attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/ARM/vldm-liveness.mir b/llvm/test/CodeGen/ARM/vldm-liveness.mir
index 14123ac3759d9..db88735719a9a 100644
--- a/llvm/test/CodeGen/ARM/vldm-liveness.mir
+++ b/llvm/test/CodeGen/ARM/vldm-liveness.mir
@@ -14,7 +14,7 @@
 # liveness flags are added.
 --- |
   target triple = "thumbv7-apple-ios"
-  define arm_aapcs_vfpcc <4 x float> @foo(float* %ptr) {
+  define arm_aapcs_vfpcc <4 x float> @foo(ptr %ptr) {
     ret <4 x float> undef
   }
 ...

diff  --git a/llvm/test/CodeGen/ARM/vldmia-sched.mir b/llvm/test/CodeGen/ARM/vldmia-sched.mir
index 1b2d9ddbff564..9a38d28d6b60c 100644
--- a/llvm/test/CodeGen/ARM/vldmia-sched.mir
+++ b/llvm/test/CodeGen/ARM/vldmia-sched.mir
@@ -24,8 +24,8 @@ body:             |
       $r0 = t2MOVTi16 internal $r0, target-flags(arm-hi16) @a, 14, $noreg
     }
     $r1 = t2ADDri $r0, 8, 14, $noreg, $noreg
-    VLDMDIA killed $r1, 14, $noreg, def $d23, def $d24, def $d25, def $d26, def $d27, def $d28, def $d29, def $d30, def $d31 :: (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 2, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 4, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 6, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 8, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 10, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 12, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 14, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 16, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 18, i32 0) to <2 x float>*)`, align 4)
+    VLDMDIA killed $r1, 14, $noreg, def $d23, def $d24, def $d25, def $d26, def $d27, def $d28, def $d29, def $d30, def $d31 :: (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 2, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 4, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 6, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 8, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 10, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 12, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 14, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 16, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 18, i32 0)`, align 4)
     $r0, dead $cpsr = tADDi8 killed $r0, 80, 14, $noreg
-    VLDMDIA killed $r0, 14, $noreg, def $d0, def $d1, def $d2, def $d3, def $d4, def $d5, def $d6 :: (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 20, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 22, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 24, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 26, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 28, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 30, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 32, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 33, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 34, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 35, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 36, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 37, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 38, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 39, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 40, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 41, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 42, i32 0) to <2 x float>*)`, align 4)
+    VLDMDIA killed $r0, 14, $noreg, def $d0, def $d1, def $d2, def $d3, def $d4, def $d5, def $d6 :: (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 20, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 22, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 24, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 26, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 28, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 30, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 32, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 33, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 34, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 35, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 36, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 37, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 38, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 39, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 40, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 41, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 42, i32 0)`, align 4)
 
 ...

diff  --git a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
index 63cd2a6c00308..8c49a53167411 100644
--- a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
+++ b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir
@@ -2,7 +2,7 @@
 --- |
   target triple = "thumbv8m.main-arm-none-eabi"
 
-  define hidden void @foo(void ()* nocapture %baz) local_unnamed_addr #0 {
+  define hidden void @foo(ptr nocapture %baz) local_unnamed_addr #0 {
   entry:
     %call = call i32 @bar() #0
     %tobool = icmp eq i32 %call, 0

diff  --git a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
index f1928f8159dc4..3069cbe5969d6 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
+++ b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir
@@ -7,21 +7,21 @@
 --- |
   target triple = "hexagon-unknown-unknown-elf"
 
-  %s.0 = type { i32 (...)**, i32, i32, %s.1 }
+  %s.0 = type { ptr, i32, i32, %s.1 }
   %s.1 = type { i32, i32 }
 
-  @g0 = external dso_local unnamed_addr constant { [3 x i8*], [3 x i8*] }, align 4
+  @g0 = external dso_local unnamed_addr constant { [3 x ptr], [3 x ptr] }, align 4
 
   ; Function Attrs: norecurse
   define void @f0() #0 {
   b0:
-    %v0 = load i32 (%s.0*)*, i32 (%s.0*)** bitcast (i8* getelementptr (i8, i8* bitcast (i8** getelementptr inbounds ({ [3 x i8*], [3 x i8*] }, { [3 x i8*], [3 x i8*] }* @g0, i32 0, inrange i32 0, i32 3) to i8*), i32 sub (i32 ptrtoint (i32 (%s.0*)* @f1 to i32), i32 1)) to i32 (%s.0*)**), align 4
-    %v1 = call i32 %v0(%s.0* nonnull undef)
+    %v0 = load ptr, ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1)), align 4
+    %v1 = call i32 %v0(ptr nonnull undef)
     unreachable
   }
 
   ; Function Attrs: norecurse nounwind
-  declare dso_local i32 @f1(%s.0*) #1 align 2
+  declare dso_local i32 @f1(ptr) #1 align 2
 
   attributes #0 = { norecurse "target-cpu"="hexagonv60" }
   attributes #1 = { norecurse nounwind "target-cpu"="hexagonv60" }
@@ -33,7 +33,7 @@ tracksRegLiveness: true
 body: |
   bb.0.b0:
     $r2 = A2_tfrsi @g0 + 12
-    $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `i32 (%s.0*)** bitcast (i8* getelementptr (i8, i8* bitcast (i8** getelementptr inbounds ({ [3 x i8*], [3 x i8*] }, { [3 x i8*], [3 x i8*] }* @g0, i32 0, inrange i32 0, i32 3) to i8*), i32 sub (i32 ptrtoint (i32 (%s.0*)* @f1 to i32), i32 1)) to i32 (%s.0*)**)`)
+    $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1))`)
     ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29
     PS_callr_nr killed $r2, hexagoncsr, implicit undef $r0, implicit-def $r29, implicit-def dead $r0
     ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29

diff  --git a/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir b/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir
index 8e43189f82feb..ab035d9b92227 100644
--- a/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir
+++ b/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir
@@ -14,14 +14,13 @@ define dso_local i32 @f0(i1 zeroext %a0) local_unnamed_addr #0 {
 b0:
   %v0 = tail call i32 @llvm.read_register.i32(metadata !0)
   %v1 = add nsw i32 %v0, 4096
-  %v2 = inttoptr i32 %v1 to %s.0*
-  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 -1
-  %v4 = tail call i32 bitcast (i32 (...)* @f1 to i32 (%s.0*)*)(%s.0* noundef nonnull %v3) #2
+  %v2 = inttoptr i32 %v1 to ptr
+  %v3 = getelementptr inbounds %s.0, ptr %v2, i32 -1
+  %v4 = tail call i32 @f1(ptr noundef nonnull %v3) #2
   br i1 %a0, label %b2, label %b1
 
 b1:                                               ; preds = %b0
-  %v5 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 0, i32 0, i32 0
-  %v6 = load i32, i32* %v5, align 4
+  %v6 = load i32, ptr %v3, align 4
   br label %b2
 
 b2:                                               ; preds = %b1, %b0

diff  --git a/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir b/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir
index 8a924ffc3ff41..5d84e66f83490 100644
--- a/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir
+++ b/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir
@@ -8,7 +8,7 @@
 # CHECK: L2_loadri_io killed $r0, 12
 
 --- |
-  define void @foo(i32* %a, i32* %b) {
+  define void @foo(ptr %a, ptr %b) {
     ret void
   }
 ...

diff  --git a/llvm/test/CodeGen/Hexagon/bank-conflict.mir b/llvm/test/CodeGen/Hexagon/bank-conflict.mir
index 07c0edd8ba41d..f59fc5ce8f7de 100644
--- a/llvm/test/CodeGen/Hexagon/bank-conflict.mir
+++ b/llvm/test/CodeGen/Hexagon/bank-conflict.mir
@@ -31,19 +31,18 @@
 
   define void @f0(i32 %a0) {
   b0:
-    %v0 = bitcast [10 x %s.0]* inttoptr (i32 -121502345 to [10 x %s.0]*) to [10 x %s.0]*
+    %v0 = bitcast ptr inttoptr (i32 -121502345 to ptr) to ptr
     br label %b1
 
   b1:                                               ; preds = %b5, %b0
     %v1 = phi i32 [ 0, %b0 ], [ %v28, %b5 ]
     %v2 = phi i32 [ 0, %b0 ], [ %v27, %b5 ]
-    %v3 = load i32, i32* @g2, align 4
-    %v4 = load i32, i32* @g3, align 8
+    %v3 = load i32, ptr @g2, align 4
+    %v4 = load i32, ptr @g3, align 8
     %v5 = and i32 %v4, %v3
-    %v6 = getelementptr [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2
-    %v7 = bitcast %s.0* %v6 to %s.0*
-    %v8 = getelementptr %s.0, %s.0* %v7, i32 0, i32 12
-    %v9 = getelementptr %s.0, %s.0* %v7, i32 0, i32 13
+    %v6 = getelementptr [10 x %s.0], ptr %v0, i32 0, i32 %v2
+    %v8 = getelementptr %s.0, ptr %v6, i32 0, i32 12
+    %v9 = getelementptr %s.0, ptr %v6, i32 0, i32 13
     br label %b2
 
   b2:                                               ; preds = %b4, %b1
@@ -51,20 +50,20 @@
     %v11 = phi i32 [ %v13, %b4 ], [ %v5, %b1 ]
     %v12 = tail call i32 @llvm.hexagon.S2.cl0(i32 %v11)
     %v13 = tail call i32 @llvm.hexagon.S2.setbit.r(i32 %v11, i32 %v12)
-    %v14 = getelementptr [24 x i32], [24 x i32]* %v8, i32 0, i32 %v12
-    %v15 = load i32, i32* %v14, align 4
+    %v14 = getelementptr [24 x i32], ptr %v8, i32 0, i32 %v12
+    %v15 = load i32, ptr %v14, align 4
     %v16 = tail call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %v15, i32 %v15)
-    %v17 = getelementptr [24 x i32], [24 x i32]* %v9, i32 0, i32 %v12
-    %v18 = load i32, i32* %v17, align 4
+    %v17 = getelementptr [24 x i32], ptr %v9, i32 0, i32 %v12
+    %v18 = load i32, ptr %v17, align 4
     %v19 = tail call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %v16, i32 %v18, i32 %v18)
-    %v20 = load i8, i8* @g4, align 1
+    %v20 = load i8, ptr @g4, align 1
     %v21 = and i8 %v20, 1
     %v22 = icmp eq i8 %v21, 0
     br i1 %v22, label %b3, label %b4
 
   b3:                                               ; preds = %b2
     %v23 = tail call i64 @llvm.hexagon.A2.vaddws(i64 %v10, i64 %v19)
-    store i64 %v23, i64* @g0, align 8
+    store i64 %v23, ptr @g0, align 8
     br label %b4
 
   b4:                                               ; preds = %b3, %b2
@@ -80,7 +79,7 @@
     br i1 %v29, label %b6, label %b1
 
   b6:                                               ; preds = %b5
-    store i64 %v19, i64* @g1, align 8
+    store i64 %v19, ptr @g1, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir b/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir
index 63277e05cd1e7..5586748a7870c 100644
--- a/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir
+++ b/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir
@@ -22,16 +22,16 @@
   %s.9 = type { i8, i8 }
 
   ; Function Attrs: nounwind optsize
-  define dso_local void @f0(%s.0* byval(%s.0) nocapture readonly align 8 %a0) local_unnamed_addr #0 {
+  define dso_local void @f0(ptr byval(%s.0) nocapture readonly align 8 %a0) local_unnamed_addr #0 {
   b0:
-    %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 10
-    %v1 = load i8, i8* %v0, align 8
-    %v2 = tail call i8* @f1(i8 signext %v1) #0
+    %v0 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 10
+    %v1 = load i8, ptr %v0, align 8
+    %v2 = tail call ptr @f1(i8 signext %v1) #0
     unreachable
   }
 
   ; Function Attrs: nounwind optsize
-  declare dso_local i8* @f1(i8 signext) local_unnamed_addr #0
+  declare dso_local ptr @f1(i8 signext) local_unnamed_addr #0
 
   attributes #0 = { nounwind optsize "target-cpu"="hexagonv65" }
 

diff  --git a/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir b/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir
index 74346d0ddc210..150cc073983ef 100644
--- a/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir
+++ b/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir
@@ -14,7 +14,7 @@ body: |
     successors: %bb.1, %bb.2
 
     %0:intregs = IMPLICIT_DEF
-    %1:intregs = L2_loadrub_io killed %0:intregs, 0 :: (load (s8) from `i8* undef`, align 2)
+    %1:intregs = L2_loadrub_io killed %0:intregs, 0 :: (load (s8) from `ptr undef`, align 2)
     %2:predregs = C2_cmpeqi %1:intregs, 5
     %3:intregs = A2_tfrsi 0
     S2_pstorerbt_io %2:predregs, %stack.0, 267, killed %3:intregs :: (store (s8) into %stack.0)

diff  --git a/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir b/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir
index 22cb2e42f4c59..0673ba7c218f5 100644
--- a/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir
+++ b/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir
@@ -11,11 +11,11 @@
 
   define void @f0() #0 {
   b0:
-    tail call fastcc void @f1(float* inttoptr (i64 add (i64 ptrtoint ([0 x i8]* @0 to i64), i64 128) to float*), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @1, i32 0, i32 0))
+    tail call fastcc void @f1(ptr inttoptr (i64 add (i64 ptrtoint (ptr @0 to i64), i64 128) to ptr), ptr @1)
     ret void
   }
 
-  declare fastcc void @f1(float* nocapture readonly, i64* nocapture readonly) #1
+  declare fastcc void @f1(ptr nocapture readonly, ptr nocapture readonly) #1
 
   attributes #0 = { alwaysinline nounwind "target-cpu"="hexagonv60" }
   attributes #1 = { noinline norecurse nounwind "target-cpu"="hexagonv60" }

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir b/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir
index 0dedca4d14ff0..8fe30e3c50cf4 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir
+++ b/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir
@@ -14,21 +14,20 @@
   %s.2 = type { %s.3 }
   %s.3 = type { %s.4 }
   %s.4 = type { %s.5 }
-  %s.5 = type { i32, i32, i8* }
+  %s.5 = type { i32, i32, ptr }
 
-  declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
+  declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
 
   define hidden fastcc void @f0() {
   b0:
     %v0 = alloca %s.0, align 4
-    %v1 = load i8, i8* undef, align 1
+    %v1 = load i8, ptr undef, align 1
     %v2 = add i8 %v1, -102
     %v3 = icmp ult i8 %v2, 1
     br i1 %v3, label %b1, label %b2
 
   b1:                                               ; preds = %b0
-    %v4 = bitcast %s.0* %v0 to i8*
-    call void @llvm.lifetime.end.p0i8(i64 12, i8* nonnull %v4)
+    call void @llvm.lifetime.end.p0(i64 12, ptr nonnull %v0)
     br label %b2
 
   b2:                                               ; preds = %b1, %b0
@@ -58,7 +57,7 @@ body:             |
     successors: %bb.1.b1(0x40000000), %bb.2.b2(0x40000000)
 
     %1 = IMPLICIT_DEF
-    %0 = L2_loadrb_io killed %1, 0 :: (load (s8) from `i8* undef`)
+    %0 = L2_loadrb_io killed %1, 0 :: (load (s8) from `ptr undef`)
     %2 = C2_cmpeqi killed %0, 102
     %3 = COPY killed %2
     J2_jumpf killed %3, %bb.2.b2, implicit-def dead $pc

diff  --git a/llvm/test/CodeGen/Hexagon/early-if-predicator.mir b/llvm/test/CodeGen/Hexagon/early-if-predicator.mir
index 51fb2ab86a2d0..1cb2f1e933731 100644
--- a/llvm/test/CodeGen/Hexagon/early-if-predicator.mir
+++ b/llvm/test/CodeGen/Hexagon/early-if-predicator.mir
@@ -6,12 +6,12 @@
 --- |
   target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 
-  define void @if-cvt(i32* %p, i1 %c) {
+  define void @if-cvt(ptr %p, i1 %c) {
   entry:
     br i1 %c, label %if, label %endif
 
   if:                                               ; preds = %entry
-    store i32 1, i32* %p, align 4
+    store i32 1, ptr %p, align 4
     br label %endif
   
   endif:                                            ; preds = %if, %entry

diff  --git a/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir b/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir
index 2f264580b7c70..db3c7bf86ff2f 100644
--- a/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir
+++ b/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir
@@ -9,7 +9,7 @@
 
   %s.0 = type { i32 }
 
-  @g0 = external dso_local local_unnamed_addr global %s.0*, align 4, !dbg !0
+  @g0 = external dso_local local_unnamed_addr global ptr, align 4, !dbg !0
 
   define dso_local void @f0() local_unnamed_addr #0 !dbg !13 {
   b0:
@@ -21,9 +21,9 @@
     %v2 = add nsw i32 %v1, -8
     %v3 = add nsw i32 %v2, %v0
     call void @llvm.dbg.value(metadata i32 %v3, metadata !19, metadata !DIExpression()), !dbg !24
-    %v4 = load %s.0*, %s.0** @g0, align 4
-    %v5 = getelementptr inbounds %s.0, %s.0* %v4, i32 %v3, i32 0
-    store i32 undef, i32* %v5, align 4
+    %v4 = load ptr, ptr @g0, align 4
+    %v5 = getelementptr inbounds %s.0, ptr %v4, i32 %v3, i32 0
+    store i32 undef, ptr %v5, align 4
     %v6 = icmp eq i32 %v2, 0
     br i1 %v6, label %b2, label %b1
 

diff  --git a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir
index 797efc384ba7f..630b2860e1b0f 100644
--- a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir
+++ b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir
@@ -16,10 +16,10 @@ body: |
     J2_jumpf killed renamable $p0, %bb.2, implicit-def dead $pc
 
   bb.1:
-    S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `i32* undef`)
+    S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `ptr undef`)
     PS_jmpret $r31, implicit-def dead $pc
 
   bb.2:
-    S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `i32* undef`)
+    S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `ptr undef`)
     PS_jmpret $r31, implicit-def dead $pc
 ...

diff  --git a/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir b/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir
index 9743709c65377..413d13d642db2 100644
--- a/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir
+++ b/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir
@@ -18,7 +18,7 @@
   target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
 
   ; Function Attrs: nounwind
-  define void @f0(i32 %a0, i16* nocapture %a1) #0 {
+  define void @f0(i32 %a0, ptr nocapture %a1) #0 {
   b0:
     br i1 undef, label %b1, label %b2.preheader
 
@@ -26,20 +26,20 @@
     br i1 undef, label %b3, label %b2.preheader
 
   b2.preheader:                                     ; preds = %b0, %b1
-    %cgep = getelementptr i16, i16* %a1, i32 undef
+    %cgep = getelementptr i16, ptr %a1, i32 undef
     br label %b2
 
   b2:                                               ; preds = %b2.preheader, %b2
-    %lsr.iv = phi i16* [ %cgep, %b2.preheader ], [ %cgep3, %b2 ]
+    %lsr.iv = phi ptr [ %cgep, %b2.preheader ], [ %cgep3, %b2 ]
     %v1 = phi i32 [ %v7, %b2 ], [ undef, %b2.preheader ]
     %v2 = phi i32 [ %v1, %b2 ], [ %a0, %b2.preheader ]
     %v3 = add nsw i32 %v2, -2
-    %cgep2 = getelementptr inbounds i16, i16* %a1, i32 %v3
-    %v5 = load i16, i16* %cgep2, align 2, !tbaa !0
-    store i16 %v5, i16* %lsr.iv, align 2, !tbaa !0
+    %cgep2 = getelementptr inbounds i16, ptr %a1, i32 %v3
+    %v5 = load i16, ptr %cgep2, align 2, !tbaa !0
+    store i16 %v5, ptr %lsr.iv, align 2, !tbaa !0
     %v7 = add nsw i32 %v1, -1
     %v8 = icmp sgt i32 %v7, 0
-    %cgep3 = getelementptr i16, i16* %lsr.iv, i32 -1
+    %cgep3 = getelementptr i16, ptr %lsr.iv, i32 -1
     br i1 %v8, label %b2, label %b3
 
   b3:                                               ; preds = %b2, %b1

diff  --git a/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir b/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir
index 962df0c055247..3bf9b3ef7c79e 100644
--- a/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir
+++ b/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir
@@ -9,7 +9,7 @@
 # CHECK: r1 = memw(r0++#8)
 
 --- |
-  define void @fred(i32* %a) { ret void }
+  define void @fred(ptr %a) { ret void }
 ...
 ---
 name: fred

diff  --git a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
index 093d3ad945aa5..67f4dd72ea0b2 100644
--- a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
+++ b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir
@@ -42,7 +42,7 @@
     %shl50 = shl i64 %add45, %4
     %and52 = and i64 %shl37, %or12
     %and54 = and i64 %shl50, %or26
-    store i64 %and54, i64* undef, align 8
+    store i64 %and54, ptr undef, align 8
     %cmp56 = icmp eq i64 %and52, 0
     br i1 %cmp56, label %for.end, label %if.end82
 
@@ -186,7 +186,7 @@ body:             |
     %21 = COPY %13
     %21 = S2_lsr_i_p_and %21, %29, 9
     %22 = S2_asl_i_p_and %22, %7, 42
-    S2_storerd_io undef %23, 0, %22 :: (store (s64) into `i64* undef`)
+    S2_storerd_io undef %23, 0, %22 :: (store (s64) into `ptr undef`)
     %25 = C2_cmpeqp %21, %51
     J2_jumpt %25, %bb.3.for.end, implicit-def dead $pc
     J2_jump %bb.2.if.end82, implicit-def dead $pc

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir
index e578cf9746fe4..72ddaa48c9f74 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @add_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir
index 5990dc08b02ee..bca4283ccfc7c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define i32 @indirectbr(i8* %addr) {
+  define i32 @indirectbr(ptr %addr) {
   entry:
-    indirectbr i8* %addr, [label %L1, label %L2]
+    indirectbr ptr %addr, [label %L1, label %L2]
 
   L1:
     ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir
index 308c18f8f8711..2e7dd7a2432ed 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir
@@ -2,8 +2,8 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
-  define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @fabs_v4f32(ptr %a, ptr %c) { entry: ret void }
+  define void @fabs_v2f64(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir
index f569ec81c2c68..c3903fafc3a55 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir
@@ -2,7 +2,7 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @atomic_load_i32(i32* %ptr) { ret void }
+  define void @atomic_load_i32(ptr %ptr) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir
index 1fcba134704a5..c19b123b30a34 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir
@@ -2,17 +2,17 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir
index 2fdae65b0a5c5..4458b39212c37 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir
@@ -2,8 +2,8 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
-  define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @sqrt_v4f32(ptr %a, ptr %c) { entry: ret void }
+  define void @sqrt_v2f64(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir
index f4d065a20549d..94eb203d96a02 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir
@@ -5,7 +5,7 @@
   @.str = private unnamed_addr constant [11 x i8] c"hello %d \0A\00"
 
   define void @main() {entry: ret void}
-  declare i32 @printf(i8*, ...)
+  declare i32 @printf(ptr, ...)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir
index 0540d41e10b42..e5af2cc5acde1 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir
@@ -3,9 +3,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mattr=+fp64,+mips32r2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32FP64
 --- |
 
-  define void @load_i32(i32* %ptr) {entry: ret void}
-  define void @load_float(float* %ptr) {entry: ret void}
-  define void @load_double(double* %ptr) {entry: ret void}
+  define void @load_i32(ptr %ptr) {entry: ret void}
+  define void @load_float(ptr %ptr) {entry: ret void}
+  define void @load_double(ptr %ptr) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir
index 78808f6cdd75b..e053a0934b844 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir
@@ -8,19 +8,19 @@
 
   define float @load_float_align1() {
   entry:
-    %0 = load float, float* @float_align1, align 1
+    %0 = load float, ptr @float_align1, align 1
     ret float %0
   }
 
   define float @load_float_align4() {
   entry:
-    %0 = load float, float* @float_align4, align 4
+    %0 = load float, ptr @float_align4, align 4
     ret float %0
   }
 
   define i32 @load_i32_align8() {
   entry:
-    %0 = load i32, i32* @i32_align8, align 8
+    %0 = load i32, ptr @i32_align8, align 8
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir
index 3b4a40a476715..26d397410ff9e 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir
@@ -8,19 +8,19 @@
 
   define float @load_float_align1() {
   entry:
-    %0 = load float, float* @float_align1, align 1
+    %0 = load float, ptr @float_align1, align 1
     ret float %0
   }
 
   define float @load_float_align8() {
   entry:
-    %0 = load float, float* @float_align8, align 8
+    %0 = load float, ptr @float_align8, align 8
     ret float %0
   }
 
   define i32 @load_i32_align2() {
   entry:
-    %0 = load i32, i32* @i32_align2, align 2
+    %0 = load i32, ptr @i32_align2, align 2
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir
index 18a884b21cfd6..eafe5af13a90c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir
@@ -2,12 +2,12 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=msa,+fp64 -mattr=nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @load_store_v16i8(<16 x i8>* %a, <16 x i8>* %b) { entry: ret void }
-  define void @load_store_v8i16(<8 x i16>* %a, <8 x i16>* %b) { entry: ret void }
-  define void @load_store_v4i32(<4 x i32>* %a, <4 x i32>* %b) { entry: ret void }
-  define void @load_store_v2i64(<2 x i64>* %a, <2 x i64>* %b) { entry: ret void }
-  define void @load_store_v4f32(<4 x float>* %a, <4 x float>* %b) { entry: ret void }
-  define void @load_store_v2f64(<2 x double>* %a, <2 x double>* %b) { entry: ret void }
+  define void @load_store_v16i8(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v8i16(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v4i32(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v2i64(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v4f32(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v2f64(ptr %a, ptr %b) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir
index 7a6a684f3db1e..6044f73b1c805 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir
@@ -3,7 +3,7 @@
 --- |
 
   define void @mul_i32(i32 %x, i32 %y) {entry: ret void}
-  define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) { ret void }
+  define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir
index 14ad6761ebd10..e3538928a094d 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @mul_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir
index 2d5dea55e1c7b..0ae5c1871a24a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @ptr_arg_in_regs(i32* %p) {entry: ret void}
-  define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void}
-  define void @ret_ptr(i8* %p) {entry: ret void}
+  define void @ptr_arg_in_regs(ptr %p) {entry: ret void}
+  define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {entry: ret void}
+  define void @ret_ptr(ptr %p) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir
index c15fcbe363f81..05c40e515712c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir
@@ -2,25 +2,25 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @srem_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @urem_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir
index 075ee35c0db73..89a06032f404e 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir
@@ -3,9 +3,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mattr=+fp64,+mips32r2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32FP64
 --- |
 
-  define void @store_i32(i32* %ptr) { entry: ret void }
-  define void @store_float(float* %ptr) { entry: ret void }
-  define void @store_double(double* %ptr) { entry: ret void }
+  define void @store_i32(ptr %ptr) { entry: ret void }
+  define void @store_float(ptr %ptr) { entry: ret void }
+  define void @store_double(ptr %ptr) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir
index 0d296683a7541..8bbfa34de1dfd 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir
@@ -8,19 +8,19 @@
 
   define void @store_float_align1(float %a) {
   entry:
-    store float %a, float* @float_align1, align 1
+    store float %a, ptr @float_align1, align 1
     ret void
   }
 
   define void @store_float_align4(float %a) {
   entry:
-    store float %a, float* @float_align4, align 4
+    store float %a, ptr @float_align4, align 4
     ret void
   }
 
   define void @store_i32_align8(i32 signext %a) {
   entry:
-    store i32 %a, i32* @i32_align8, align 8
+    store i32 %a, ptr @i32_align8, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir
index c776f2aa3d3d0..7bcd1ebb9253c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir
@@ -8,19 +8,19 @@
 
   define void @store_float_align1(float %a) #0 {
   entry:
-    store float %a, float* @float_align1, align 1
+    store float %a, ptr @float_align1, align 1
     ret void
   }
 
   define void @store_float_align8(float %a) #0 {
   entry:
-    store float %a, float* @float_align8, align 8
+    store float %a, ptr @float_align8, align 8
     ret void
   }
 
   define void @store_i32_align2(i32 signext %a) #0 {
   entry:
-    store i32 %a, i32* @i32_align2, align 2
+    store i32 %a, ptr @i32_align2, align 2
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir
index 974e089c671ee..229b1104e3f6a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sub_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir
index 655d472d2f783..eed965fe3f06a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load_store_i8(i8* %px, i8* %py) {entry: ret void}
-  define void @load_store_i16(i16* %px, i16* %py) {entry: ret void}
-  define void @load_store_i32(i32* %px, i32* %py) {entry: ret void}
+  define void @load_store_i8(ptr %px, ptr %py) {entry: ret void}
+  define void @load_store_i16(ptr %px, ptr %py) {entry: ret void}
+  define void @load_store_i32(ptr %px, ptr %py) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir
index 25f316057b111..1efa32ddd72b7 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir
@@ -2,10 +2,10 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
index cf6b47ae81f31..592e7a459204a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
@@ -11,7 +11,7 @@
   define void @add_i16_aext() {entry: ret void}
   define void @add_i64() {entry: ret void}
   define void @add_i128() {entry: ret void}
-  define void @uadd_with_overflow(i32 %lhs, i32 %rhs, i32* %padd, i1* %pcarry_flag) { ret void }
+  define void @uadd_with_overflow(i32 %lhs, i32 %rhs, ptr %padd, ptr %pcarry_flag) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir
index 3b30544693a62..a7005566d6a4a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @add_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir
index ad865007f3d0c..f4ba16e09487f 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir
@@ -3,28 +3,28 @@
 --- |
 
   declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>)
-  define void @add_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @add_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>)
-  define void @add_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @add_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>)
-  define void @add_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @add_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>)
-  define void @add_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @add_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32 immarg)
-  define void @add_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) { entry: ret void }
+  define void @add_v16i8_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32 immarg)
-  define void @add_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) { entry: ret void }
+  define void @add_v8i16_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32 immarg)
-  define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) { entry: ret void }
+  define void @add_v4i32_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32 immarg)
-  define void @add_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) { entry: ret void }
+  define void @add_v2i64_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir
index 705ace07a4b98..6431fb0da8e94 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define i32 @indirectbr(i8* %addr) {
+  define i32 @indirectbr(ptr %addr) {
   entry:
-    indirectbr i8* %addr, [label %L1, label %L2]
+    indirectbr ptr %addr, [label %L1, label %L2]
 
   L1:
     ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
index 00f630bfd1283..43b2803a36e2a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir
@@ -2,17 +2,17 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  declare i32 @puts(i8*)
-  declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1)
+  declare i32 @puts(ptr)
+  declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)
 
   define void @Print_c_N_times(i8 %c, i32 %N) {
   entry:
     %add = add i32 %N, 1
     %vla = alloca i8, i32 %add, align 1
-    call void @llvm.memset.p0i8.i32(i8* align 1 %vla, i8 %c, i32 %N, i1 false)
-    %arrayidx = getelementptr inbounds i8, i8* %vla, i32 %N
-    store i8 0, i8* %arrayidx, align 1
-    %call = call i32 @puts(i8* %vla)
+    call void @llvm.memset.p0.i32(ptr align 1 %vla, i8 %c, i32 %N, i1 false)
+    %arrayidx = getelementptr inbounds i8, ptr %vla, i32 %N
+    store i8 0, ptr %arrayidx, align 1
+    %call = call i32 @puts(ptr %vla)
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir
index 1190eb0c30a12..a6a7d8e873c56 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir
@@ -2,8 +2,8 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
-  define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @fabs_v4f32(ptr %a, ptr %c) { entry: ret void }
+  define void @fabs_v2f64(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir
index afab80f00afdd..f8cc8298fb70b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir
@@ -3,10 +3,10 @@
 --- |
 
   declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>)
-  define void @fabs_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
+  define void @fabs_v4f32_builtin(ptr %a, ptr %c) { entry: ret void }
 
   declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>)
-  define void @fabs_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @fabs_v2f64_builtin(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir
index 43b9bdfbfe9f1..d2074542cb5bd 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir
@@ -2,7 +2,7 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @atomic_load_i32(i32* %ptr) { ret void }
+  define void @atomic_load_i32(ptr %ptr) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir
index 4060c17690218..ddc5f34245232 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir
@@ -2,17 +2,17 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir
index b1fdeeac53628..252584abcc2ea 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir
@@ -3,28 +3,28 @@
 --- |
 
   declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>)
-  define void @fadd_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
+  define void @fadd_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>)
-  define void @fadd_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fadd_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>)
-  define void @fsub_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
+  define void @fsub_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>)
-  define void @fsub_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fsub_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>)
-  define void @fmul_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
+  define void @fmul_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>)
-  define void @fmul_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fmul_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>)
-  define void @fdiv_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
+  define void @fdiv_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>)
-  define void @fdiv_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fdiv_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir
index 599c8c0c8da9d..cae3ab4fc875b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir
@@ -2,8 +2,8 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
-  define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @sqrt_v4f32(ptr %a, ptr %c) { entry: ret void }
+  define void @sqrt_v2f64(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir
index 36dfdbbdeaf03..eeb1a9484ba96 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir
@@ -3,10 +3,10 @@
 --- |
 
   declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>)
-  define void @fsqrt_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
+  define void @fsqrt_v4f32_builtin(ptr %a, ptr %c) { entry: ret void }
 
   declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>)
-  define void @fsqrt_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @fsqrt_v2f64_builtin(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir
index 2056eda04fa91..b1129f52b923a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir
@@ -5,7 +5,7 @@
   @.str = private unnamed_addr constant [11 x i8] c"hello %d \0A\00"
 
   define void @main() {entry: ret void}
-  declare i32 @printf(i8*, ...)
+  declare i32 @printf(ptr, ...)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir
index dd76044a41617..8de7b2da18591 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir
@@ -14,49 +14,49 @@
 
   define float @load_float_align1() {
   entry:
-    %0 = load float, float* @float_align1, align 1
+    %0 = load float, ptr @float_align1, align 1
     ret float %0
   }
 
   define float @load_float_align2() {
   entry:
-    %0 = load float, float* @float_align2, align 2
+    %0 = load float, ptr @float_align2, align 2
     ret float %0
   }
 
   define float @load_float_align4() {
   entry:
-    %0 = load float, float* @float_align4, align 4
+    %0 = load float, ptr @float_align4, align 4
     ret float %0
   }
 
   define float @load_float_align8() {
   entry:
-    %0 = load float, float* @float_align8, align 8
+    %0 = load float, ptr @float_align8, align 8
     ret float %0
   }
 
   define i32 @load_i32_align1() {
   entry:
-    %0 = load i32, i32* @i32_align1, align 1
+    %0 = load i32, ptr @i32_align1, align 1
     ret i32 %0
   }
 
   define i32 @load_i32_align2() {
   entry:
-    %0 = load i32, i32* @i32_align2, align 2
+    %0 = load i32, ptr @i32_align2, align 2
     ret i32 %0
   }
 
   define i32 @load_i32_align4() {
   entry:
-    %0 = load i32, i32* @i32_align4, align 4
+    %0 = load i32, ptr @i32_align4, align 4
     ret i32 %0
   }
 
   define i32 @load_i32_align8() {
   entry:
-    %0 = load i32, i32* @i32_align8, align 8
+    %0 = load i32, ptr @i32_align8, align 8
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
index cc2a43d33b289..e96d826638541 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir
@@ -12,7 +12,7 @@
   define void @mul_i64() {entry: ret void}
   define void @mul_i128() {entry: ret void}
   define void @umulh_i64() {entry: ret void}
-  define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) { ret void }
+  define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir
index a0934a4328d27..d61656b470fb0 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @mul_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir
index 30fb1cd1866a8..65d2b075fad2a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir
@@ -3,16 +3,16 @@
 --- |
 
   declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>)
-  define void @mul_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @mul_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>)
-  define void @mul_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @mul_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>)
-  define void @mul_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @mul_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>)
-  define void @mul_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @mul_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir
index e5963d6fe4280..261bcd63c28a6 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir
@@ -77,10 +77,10 @@
     ret i64 %cond
   }
 
-  define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+  define void @phi_ambiguous_i64_in_fpr(i1 %cnd, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {
   entry:
-    %0 = load i64, i64* %i64_ptr_a, align 8
-    %1 = load i64, i64* %i64_ptr_b, align 8
+    %0 = load i64, ptr %i64_ptr_a, align 8
+    %1 = load i64, ptr %i64_ptr_b, align 8
     br i1 %cnd, label %cond.true, label %cond.false
 
   cond.true:                                        ; preds = %entry
@@ -91,7 +91,7 @@
 
   cond.end:                                         ; preds = %cond.false, %cond.true
     %cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ]
-    store i64 %cond, i64* %i64_ptr_c, align 8
+    store i64 %cond, ptr %i64_ptr_c, align 8
     ret void
   }
 
@@ -110,10 +110,10 @@
     ret float %cond
   }
 
-  define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+  define void @phi_ambiguous_float_in_gpr(i1 %cnd, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {
   entry:
-    %0 = load float, float* %f32_ptr_a, align 4
-    %1 = load float, float* %f32_ptr_b, align 4
+    %0 = load float, ptr %f32_ptr_a, align 4
+    %1 = load float, ptr %f32_ptr_b, align 4
     br i1 %cnd, label %cond.true, label %cond.false
 
   cond.true:                                        ; preds = %entry
@@ -124,7 +124,7 @@
 
   cond.end:                                         ; preds = %cond.false, %cond.true
     %cond = phi float [ %0, %cond.true ], [ %1, %cond.false ]
-    store float %cond, float* %f32_ptr_c, align 4
+    store float %cond, ptr %f32_ptr_c, align 4
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir
index 805298ebd76ea..2e455a130e702 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @ptr_arg_in_regs(i32* %p) {entry: ret void}
-  define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void}
-  define void @ret_ptr(i8* %p) {entry: ret void}
+  define void @ptr_arg_in_regs(ptr %p) {entry: ret void}
+  define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {entry: ret void}
+  define void @ret_ptr(ptr %p) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir
index 06be78b4533bc..7d58e1ad132b4 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir
@@ -2,25 +2,25 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @srem_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @urem_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir
index cd2dfc4630807..7d7749bd5de9c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir
@@ -3,52 +3,52 @@
 --- |
 
   declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>)
-  define void @sdiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @sdiv_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>)
-  define void @sdiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @sdiv_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>)
-  define void @sdiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @sdiv_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>)
-  define void @sdiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sdiv_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>)
-  define void @smod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @smod_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>)
-  define void @smod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @smod_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>)
-  define void @smod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @smod_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>)
-  define void @smod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @smod_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>)
-  define void @udiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @udiv_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>)
-  define void @udiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @udiv_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>)
-  define void @udiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @udiv_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>)
-  define void @udiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @udiv_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>)
-  define void @umod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @umod_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>)
-  define void @umod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @umod_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>)
-  define void @umod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @umod_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>)
-  define void @umod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @umod_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir
index 23d28a3789c71..698972063702f 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir
@@ -14,49 +14,49 @@
 
   define void @store_float_align1(float %a) {
   entry:
-    store float %a, float* @float_align1, align 1
+    store float %a, ptr @float_align1, align 1
     ret void
   }
 
   define void @store_float_align2(float %a) {
   entry:
-    store float %a, float* @float_align2, align 2
+    store float %a, ptr @float_align2, align 2
     ret void
   }
 
   define void @store_float_align4(float %a) {
   entry:
-    store float %a, float* @float_align4, align 4
+    store float %a, ptr @float_align4, align 4
     ret void
   }
 
   define void @store_float_align8(float %a) {
   entry:
-    store float %a, float* @float_align8, align 8
+    store float %a, ptr @float_align8, align 8
     ret void
   }
 
   define void @store_i32_align1(i32 signext %a) {
   entry:
-    store i32 %a, i32* @i32_align1, align 1
+    store i32 %a, ptr @i32_align1, align 1
     ret void
   }
 
   define void @store_i32_align2(i32 signext %a) {
   entry:
-    store i32 %a, i32* @i32_align2, align 2
+    store i32 %a, ptr @i32_align2, align 2
     ret void
   }
 
   define void @store_i32_align4(i32 signext %a) {
   entry:
-    store i32 %a, i32* @i32_align4, align 4
+    store i32 %a, ptr @i32_align4, align 4
     ret void
   }
 
   define void @store_i32_align8(i32 signext %a) {
   entry:
-    store i32 %a, i32* @i32_align8, align 8
+    store i32 %a, ptr @i32_align8, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir
index 9b8d977b39113..645c63b915f7c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sub_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir
index efa9f473a9253..fd246645b2b2a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir
@@ -3,28 +3,28 @@
 --- |
 
   declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>)
-  define void @sub_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
+  define void @sub_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>)
-  define void @sub_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
+  define void @sub_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>)
-  define void @sub_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
+  define void @sub_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>)
-  define void @sub_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sub_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
   declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32 immarg)
-  define void @sub_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) { entry: ret void }
+  define void @sub_v16i8_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
   declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32 immarg)
-  define void @sub_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) { entry: ret void }
+  define void @sub_v8i16_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
   declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32 immarg)
-  define void @sub_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) { entry: ret void }
+  define void @sub_v4i32_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
   declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32 immarg)
-  define void @sub_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) { entry: ret void }
+  define void @sub_v2i64_builtin_imm(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir
index 817a2a020f111..2e1c82c879f51 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir
@@ -2,16 +2,16 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void}
-  define void @load1_s8_to_zextLoad1_s16(i8* %px) {entry: ret void}
-  define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load4_s32_to_zextLoad4_s64(i8* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s16(i8* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load4_s32_to_sextLoad4_s64(i8* %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s16(ptr %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load4_s32_to_zextLoad4_s64(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s16(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load4_s32_to_sextLoad4_s64(ptr %px) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir
index 5289d88259f08..b68179aa0bda9 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir
@@ -2,8 +2,8 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=mips-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load1_s8_to_load1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_load2_s32(i16* %px) {entry: ret void}
+  define void @load1_s8_to_load1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_load2_s32(ptr %px) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir
index 6390ad42e23e8..cea581c614844 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir
@@ -2,16 +2,16 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=mips-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void}
-  define void @load1_s8_to_zextLoad1_s16(i8* %px) {entry: ret void}
-  define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load4_s32_to_zextLoad4_s64(i8* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s16(i8* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load4_s32_to_sextLoad4_s64(i8* %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s16(ptr %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load4_s32_to_zextLoad4_s64(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s16(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load4_s32_to_sextLoad4_s64(ptr %px) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir
index fcc34602d8642..011a05e133759 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir
@@ -2,8 +2,8 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @skipCopiesOutgoing(float* %ptr_a, float* %ptr_b, float* %ptr_c) {entry: ret void}
-  define void @skipCopiesIncoming(float* %float_ptr) {entry: ret void}
+  define void @skipCopiesOutgoing(ptr %ptr_a, ptr %ptr_b, ptr %ptr_c) {entry: ret void}
+  define void @skipCopiesIncoming(ptr %float_ptr) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir
index 4236c15333e22..a963d2dc500e5 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @add_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @add_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir
index dee3e5d25cac4..918cf4ee236b1 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define i32 @indirectbr(i8* %addr) {
+  define i32 @indirectbr(ptr %addr) {
   entry:
-    indirectbr i8* %addr, [label %L1, label %L2]
+    indirectbr ptr %addr, [label %L1, label %L2]
 
   L1:
     ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir
index 56dc87b966818..2b0476f3efcb3 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir
@@ -2,8 +2,8 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
-  define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @fabs_v4f32(ptr %a, ptr %c) { entry: ret void }
+  define void @fabs_v2f64(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir
index e9051c3ad7f06..9c7ec54d97435 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir
@@ -2,7 +2,7 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @atomic_load_i32(i32* %ptr) { ret void }
+  define void @atomic_load_i32(ptr %ptr) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir
index 11acce60ce1ba..b696be65dee6d 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir
@@ -2,17 +2,17 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
-  define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void }
-  define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void }
+  define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir
index 8b15f07997d89..a918bae094b17 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir
@@ -2,8 +2,8 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void }
-  define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void }
+  define void @sqrt_v4f32(ptr %a, ptr %c) { entry: ret void }
+  define void @sqrt_v2f64(ptr %a, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir
index cf19ddceaace0..0e20bc7cb95db 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir
@@ -5,7 +5,7 @@
   @.str = private unnamed_addr constant [11 x i8] c"hello %d \0A\00"
 
   define void @main() {entry: ret void}
-  declare i32 @printf(i8*, ...)
+  declare i32 @printf(ptr, ...)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
index cc1f9aa028fd8..ef607c1eb8bb1 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
@@ -2,12 +2,12 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load_i32(i32* %ptr) {entry: ret void}
-  define void @load_i64(i64* %ptr) {entry: ret void}
-  define void @load_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b) {entry: ret void}
-  define void @load_float(float* %ptr) {entry: ret void}
-  define void @load_ambiguous_float_in_gpr(float* %float_ptr_a, float* %float_ptr_b) {entry: ret void}
-  define void @load_double(double* %ptr) {entry: ret void}
+  define void @load_i32(ptr %ptr) {entry: ret void}
+  define void @load_i64(ptr %ptr) {entry: ret void}
+  define void @load_ambiguous_i64_in_fpr(ptr %i64_ptr_a, ptr %i64_ptr_b) {entry: ret void}
+  define void @load_float(ptr %ptr) {entry: ret void}
+  define void @load_ambiguous_float_in_gpr(ptr %float_ptr_a, ptr %float_ptr_b) {entry: ret void}
+  define void @load_double(ptr %ptr) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir
index 863c26fe80892..142d7ea31e5cb 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir
@@ -9,19 +9,19 @@
 
   define float @load_float_align1() {
   entry:
-    %0 = load float, float* @float_align1, align 1
+    %0 = load float, ptr @float_align1, align 1
     ret float %0
   }
 
   define float @load_float_align4() {
   entry:
-    %0 = load float, float* @float_align4, align 4
+    %0 = load float, ptr @float_align4, align 4
     ret float %0
   }
 
   define i32 @load_i32_align8() {
   entry:
-    %0 = load i32, i32* @i32_align8, align 8
+    %0 = load i32, ptr @i32_align8, align 8
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir
index 71ac16162f872..64117a2d82635 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir
@@ -2,12 +2,12 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=msa,+fp64 -mattr=nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @load_store_v16i8(<16 x i8>* %a, <16 x i8>* %b) { entry: ret void }
-  define void @load_store_v8i16(<8 x i16>* %a, <8 x i16>* %b) { entry: ret void }
-  define void @load_store_v4i32(<4 x i32>* %a, <4 x i32>* %b) { entry: ret void }
-  define void @load_store_v2i64(<2 x i64>* %a, <2 x i64>* %b) { entry: ret void }
-  define void @load_store_v4f32(<4 x float>* %a, <4 x float>* %b) { entry: ret void }
-  define void @load_store_v2f64(<2 x double>* %a, <2 x double>* %b) { entry: ret void }
+  define void @load_store_v16i8(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v8i16(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v4i32(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v2i64(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v4f32(ptr %a, ptr %b) { entry: ret void }
+  define void @load_store_v2f64(ptr %a, ptr %b) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
index b8e5e2d22fe89..4226f2bb6ff55 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
@@ -2,7 +2,7 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -13,15 +13,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load i64, i64* %a
+    %phi1.0 = load i64, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load i64, i64* %b
+    %phi1.1 = load i64, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load i64, i64* %c
+    %phi1.2 = load i64, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -29,18 +29,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store i64 %phi1, i64* %result
+    store i64 %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load i64, i64* %a
+    %phi2.0 = load i64, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load i64, i64* %b
+    %phi2.1 = load i64, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -48,7 +48,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store i64 %phi2, i64* %result
+    store i64 %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -56,12 +56,12 @@
     %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
     %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
-    store i64 %sel_3_1.2, i64* %result
-    store i64 %phi3, i64* %result
+    store i64 %sel_3_1.2, ptr %result
+    store i64 %phi3, ptr %result
     ret void
   }
 
-  define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -72,15 +72,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load i64, i64* %a
+    %phi1.0 = load i64, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load i64, i64* %b
+    %phi1.1 = load i64, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load i64, i64* %c
+    %phi1.2 = load i64, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -88,18 +88,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store i64 %phi1, i64* %result
+    store i64 %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load i64, i64* %a
+    %phi2.0 = load i64, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load i64, i64* %b
+    %phi2.1 = load i64, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -107,7 +107,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store i64 %phi2, i64* %result
+    store i64 %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -115,12 +115,12 @@
     %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ 0, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
     %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
-    store i64 %sel_3_1.2, i64* %result
-    store i64 %phi3, i64* %result
+    store i64 %sel_3_1.2, ptr %result
+    store i64 %phi3, ptr %result
     ret void
   }
 
-  define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -131,15 +131,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load double, double* %a
+    %phi1.0 = load double, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load double, double* %b
+    %phi1.1 = load double, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load double, double* %c
+    %phi1.2 = load double, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -147,18 +147,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store double %phi1, double* %result
+    store double %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load double, double* %a
+    %phi2.0 = load double, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load double, double* %b
+    %phi2.1 = load double, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -166,7 +166,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store double %phi2, double* %result
+    store double %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -174,12 +174,12 @@
     %phi4 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
     %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
-    store double %sel_3_1.2, double* %result
-    store double %phi3, double* %result
+    store double %sel_3_1.2, ptr %result
+    store double %phi3, ptr %result
     ret void
   }
 
-  define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -190,15 +190,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load double, double* %a
+    %phi1.0 = load double, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load double, double* %b
+    %phi1.1 = load double, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load double, double* %c
+    %phi1.2 = load double, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -206,18 +206,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store double %phi1, double* %result
+    store double %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load double, double* %a
+    %phi2.0 = load double, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load double, double* %b
+    %phi2.1 = load double, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -225,7 +225,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store double %phi2, double* %result
+    store double %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -233,8 +233,8 @@
     %phi4 = phi double [ %phi2, %b.PHI.2 ], [ 0.000000e+00, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
     %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
-    store double %sel_3_1.2, double* %result
-    store double %phi3, double* %result
+    store double %sel_3_1.2, ptr %result
+    store double %phi3, ptr %result
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
index b8e5e2d22fe89..4226f2bb6ff55 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
@@ -2,7 +2,7 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -13,15 +13,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load i64, i64* %a
+    %phi1.0 = load i64, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load i64, i64* %b
+    %phi1.1 = load i64, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load i64, i64* %c
+    %phi1.2 = load i64, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -29,18 +29,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store i64 %phi1, i64* %result
+    store i64 %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load i64, i64* %a
+    %phi2.0 = load i64, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load i64, i64* %b
+    %phi2.1 = load i64, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -48,7 +48,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store i64 %phi2, i64* %result
+    store i64 %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -56,12 +56,12 @@
     %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
     %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
-    store i64 %sel_3_1.2, i64* %result
-    store i64 %phi3, i64* %result
+    store i64 %sel_3_1.2, ptr %result
+    store i64 %phi3, ptr %result
     ret void
   }
 
-  define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -72,15 +72,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load i64, i64* %a
+    %phi1.0 = load i64, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load i64, i64* %b
+    %phi1.1 = load i64, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load i64, i64* %c
+    %phi1.2 = load i64, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -88,18 +88,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store i64 %phi1, i64* %result
+    store i64 %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load i64, i64* %a
+    %phi2.0 = load i64, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load i64, i64* %b
+    %phi2.1 = load i64, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -107,7 +107,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store i64 %phi2, i64* %result
+    store i64 %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -115,12 +115,12 @@
     %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ 0, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
     %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
-    store i64 %sel_3_1.2, i64* %result
-    store i64 %phi3, i64* %result
+    store i64 %sel_3_1.2, ptr %result
+    store i64 %phi3, ptr %result
     ret void
   }
 
-  define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -131,15 +131,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load double, double* %a
+    %phi1.0 = load double, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load double, double* %b
+    %phi1.1 = load double, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load double, double* %c
+    %phi1.2 = load double, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -147,18 +147,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store double %phi1, double* %result
+    store double %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load double, double* %a
+    %phi2.0 = load double, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load double, double* %b
+    %phi2.1 = load double, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -166,7 +166,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store double %phi2, double* %result
+    store double %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -174,12 +174,12 @@
     %phi4 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
     %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
-    store double %sel_3_1.2, double* %result
-    store double %phi3, double* %result
+    store double %sel_3_1.2, ptr %result
+    store double %phi3, ptr %result
     ret void
   }
 
-  define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
   entry:
     br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
 
@@ -190,15 +190,15 @@
     br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
   b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
-    %phi1.0 = load double, double* %a
+    %phi1.0 = load double, ptr %a
     br label %b.PHI.1
 
   b.PHI.1.1:                                        ; preds = %pre.PHI.1
-    %phi1.1 = load double, double* %b
+    %phi1.1 = load double, ptr %b
     br label %b.PHI.1
 
   b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
-    %phi1.2 = load double, double* %c
+    %phi1.2 = load double, ptr %c
     br label %b.PHI.1
 
   b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
@@ -206,18 +206,18 @@
     br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
   b.PHI.1.end:                                      ; preds = %b.PHI.1
-    store double %phi1, double* %result
+    store double %phi1, ptr %result
     ret void
 
   pre.PHI.2:                                        ; preds = %entry
     br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
   b.PHI.2.0:                                        ; preds = %pre.PHI.2
-    %phi2.0 = load double, double* %a
+    %phi2.0 = load double, ptr %a
     br label %b.PHI.2
 
   b.PHI.2.1:                                        ; preds = %pre.PHI.2
-    %phi2.1 = load double, double* %b
+    %phi2.1 = load double, ptr %b
     br label %b.PHI.2
 
   b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
@@ -225,7 +225,7 @@
     br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
   b.PHI.2.end:                                      ; preds = %b.PHI.2
-    store double %phi2, double* %result
+    store double %phi2, ptr %result
     ret void
 
   b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
@@ -233,8 +233,8 @@
     %phi4 = phi double [ %phi2, %b.PHI.2 ], [ 0.000000e+00, %b.PHI.1 ]
     %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
     %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
-    store double %sel_3_1.2, double* %result
-    store double %phi3, double* %result
+    store double %sel_3_1.2, ptr %result
+    store double %phi3, ptr %result
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir
index 056cfdc4cfb14..b0fc873c6d063 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir
@@ -3,7 +3,7 @@
 --- |
 
   define void @mul_i32(i32 %x, i32 %y) {entry: ret void}
-  define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) { ret void }
+  define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) { ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir
index 874f3e560ed8a..4d6bce2063d49 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @mul_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @mul_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir
index 4744e36daa360..dc758986e9fb4 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir
@@ -32,10 +32,10 @@
     ret i64 %cond
   }
 
-  define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+  define void @phi_ambiguous_i64_in_fpr(i1 %cnd, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {
   entry:
-    %0 = load i64, i64* %i64_ptr_a, align 8
-    %1 = load i64, i64* %i64_ptr_b, align 8
+    %0 = load i64, ptr %i64_ptr_a, align 8
+    %1 = load i64, ptr %i64_ptr_b, align 8
     br i1 %cnd, label %cond.true, label %cond.false
 
   cond.true:                                        ; preds = %entry
@@ -46,7 +46,7 @@
 
   cond.end:                                         ; preds = %cond.false, %cond.true
     %cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ]
-    store i64 %cond, i64* %i64_ptr_c, align 8
+    store i64 %cond, ptr %i64_ptr_c, align 8
     ret void
   }
 
@@ -65,10 +65,10 @@
     ret float %cond
   }
 
-  define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+  define void @phi_ambiguous_float_in_gpr(i1 %cnd, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {
   entry:
-    %0 = load float, float* %f32_ptr_a, align 4
-    %1 = load float, float* %f32_ptr_b, align 4
+    %0 = load float, ptr %f32_ptr_a, align 4
+    %1 = load float, ptr %f32_ptr_b, align 4
     br i1 %cnd, label %cond.true, label %cond.false
 
   cond.true:                                        ; preds = %entry
@@ -79,7 +79,7 @@
 
   cond.end:                                         ; preds = %cond.false, %cond.true
     %cond = phi float [ %0, %cond.true ], [ %1, %cond.false ]
-    store float %cond, float* %f32_ptr_c, align 4
+    store float %cond, ptr %f32_ptr_c, align 4
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir
index b5c6efce6a10d..044ab1320816e 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @ptr_arg_in_regs(i32* %p) {entry: ret void}
-  define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void}
-  define void @ret_ptr(i8* %p) {entry: ret void}
+  define void @ptr_arg_in_regs(ptr %p) {entry: ret void}
+  define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {entry: ret void}
+  define void @ret_ptr(ptr %p) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir
index 0863b09355a5d..cef6101e2038d 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir
@@ -2,25 +2,25 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
-
-  define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @srem_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @srem_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void }
+
+  define void @urem_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @urem_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir
index 93e6b72b87611..20f18fe704d56 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir
@@ -5,9 +5,9 @@
   define void @select_i32(i32, i32) {entry: ret void}
   define void @select_ptr(i32, i32) {entry: ret void}
   define void @select_i64() {entry: ret void}
-  define void @select_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {entry: ret void}
+  define void @select_ambiguous_i64_in_fpr(ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {entry: ret void}
   define void @select_float() {entry: ret void}
-  define void @select_ambiguous_float_in_gpr(float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {entry: ret void}
+  define void @select_ambiguous_float_in_gpr(ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {entry: ret void}
   define void @select_double() {entry: ret void}
 
 ...

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
index fbc32e880d4c2..80bf04adadaca 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir
@@ -2,10 +2,10 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @store_i32(i32* %ptr) { entry: ret void }
-  define void @store_i64(i64* %ptr) { entry: ret void }
-  define void @store_float(float* %ptr) { entry: ret void }
-  define void @store_double(double* %ptr) { entry: ret void }
+  define void @store_i32(ptr %ptr) { entry: ret void }
+  define void @store_i64(ptr %ptr) { entry: ret void }
+  define void @store_float(ptr %ptr) { entry: ret void }
+  define void @store_double(ptr %ptr) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir
index b57b161d5c6e1..3f705ebea309f 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir
@@ -9,19 +9,19 @@
 
   define void @store_float_align1(float %a) {
   entry:
-    store float %a, float* @float_align1, align 1
+    store float %a, ptr @float_align1, align 1
     ret void
   }
 
   define void @store_float_align4(float %a) {
   entry:
-    store float %a, float* @float_align4, align 4
+    store float %a, ptr @float_align4, align 4
     ret void
   }
 
   define void @store_i32_align8(i32 signext %a) {
   entry:
-    store i32 %a, i32* @i32_align8, align 8
+    store i32 %a, ptr @i32_align8, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir
index cc66a985a1abd..b2114a9fb3a1c 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir
@@ -2,10 +2,10 @@
 # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600
 --- |
 
-  define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void }
-  define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void }
-  define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void }
-  define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void }
+  define void @sub_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void }
+  define void @sub_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir
index c56572d094a9f..0d81e0129def9 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir
@@ -2,14 +2,14 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @outgoing_gpr(i32* %i32_ptr) {entry: ret void}
-  define void @outgoing_fpr(float* %float_ptr) {entry: ret void}
-  define void @outgoing_gpr_instr(i32* %i32_ptr1, i32* %i32_ptr2) {entry: ret void}
-  define void @outgoing_fpr_instr(float* %float_ptr1, float* %float_ptr2) {entry: ret void}
-  define void @incoming_gpr(i32* %a) {entry: ret void}
-  define void @incoming_fpr(float* %a) {entry: ret void}
-  define void @incoming_i32_instr(i32* %i32_ptr) {entry: ret void}
-  define void @incoming_float_instr(float* %float_ptr) {entry: ret void}
+  define void @outgoing_gpr(ptr %i32_ptr) {entry: ret void}
+  define void @outgoing_fpr(ptr %float_ptr) {entry: ret void}
+  define void @outgoing_gpr_instr(ptr %i32_ptr1, ptr %i32_ptr2) {entry: ret void}
+  define void @outgoing_fpr_instr(ptr %float_ptr1, ptr %float_ptr2) {entry: ret void}
+  define void @incoming_gpr(ptr %a) {entry: ret void}
+  define void @incoming_fpr(ptr %a) {entry: ret void}
+  define void @incoming_i32_instr(ptr %i32_ptr) {entry: ret void}
+  define void @incoming_float_instr(ptr %float_ptr) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir
index d7f549fd92220..5ed6dc4fc55fb 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir
@@ -2,9 +2,9 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load_store_i8(i8* %px, i8* %py) {entry: ret void}
-  define void @load_store_i16(i16* %px, i16* %py) {entry: ret void}
-  define void @load_store_i32(i32* %px, i32* %py) {entry: ret void}
+  define void @load_store_i8(ptr %px, ptr %py) {entry: ret void}
+  define void @load_store_i16(ptr %px, ptr %py) {entry: ret void}
+  define void @load_store_i32(ptr %px, ptr %py) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir
index 44baa0d07e6d6..1ffed379a4ffc 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir
+++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir
@@ -2,12 +2,12 @@
 # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 --- |
 
-  define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void}
-  define void @load4_s32_to_zextLoad4_s64(i8* %px) {entry: ret void}
-  define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void}
-  define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void}
-  define void @load4_s32_to_sextLoad4_s64(i8* %px) {entry: ret void}
+  define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void}
+  define void @load4_s32_to_zextLoad4_s64(ptr %px) {entry: ret void}
+  define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void}
+  define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void}
+  define void @load4_s32_to_sextLoad4_s64(ptr %px) {entry: ret void}
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir b/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir
index c964988b7ea06..33e43ad06f124 100644
--- a/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir
+++ b/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir
@@ -14,33 +14,33 @@
   entry:
     %retval = alloca i32, align 4
     %a.addr = alloca i32, align 4
-    store i32 %a, i32* %a.addr, align 4
-    %0 = load i32, i32* %a.addr, align 4
+    store i32 %a, ptr %a.addr, align 4
+    %0 = load i32, ptr %a.addr, align 4
     %cmp = icmp sgt i32 %0, 5
     br i1 %cmp, label %if.then, label %if.else
 
   if.then:                                          ; preds = %entry
-    %1 = load i32, i32* %a.addr, align 4
-    %2 = load i32, i32* %a.addr, align 4
+    %1 = load i32, ptr %a.addr, align 4
+    %2 = load i32, ptr %a.addr, align 4
     %add = add nsw i32 %1, %2
-    store i32 %add, i32* %retval, align 4
+    store i32 %add, ptr %retval, align 4
     br label %return
 
   if.else:                                          ; preds = %entry
-    %3 = load i32, i32* %a.addr, align 4
+    %3 = load i32, ptr %a.addr, align 4
     %call = call i32 @g(i32 signext %3)
-    store i32 %call, i32* %retval, align 4
+    store i32 %call, ptr %retval, align 4
     br label %return
 
   return:                                           ; preds = %if.else, %if.then
-    %4 = load i32, i32* %retval, align 4
+    %4 = load i32, ptr %retval, align 4
     ret i32 %4
   }
 
   declare i32 @g(i32 signext)
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**)
+  declare void @llvm.stackprotector(ptr, ptr)
 
   !llvm.ident = !{!0}
 

diff  --git a/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir b/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir
index 066330522ac65..9cde85bd211a9 100644
--- a/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir
+++ b/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir
@@ -16,28 +16,28 @@
 --- |
   target datalayout = "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128"
   target triple = "mips64-unknown-freebsd"
-  declare i8* @func_a(i64 zeroext)
-  declare i8* @func_b(i64 zeroext)
+  declare ptr @func_a(i64 zeroext)
+  declare ptr @func_b(i64 zeroext)
   ; Function Attrs: nounwind
-  define i8* @test(i64 zeroext %nbytes) local_unnamed_addr #0 {
+  define ptr @test(i64 zeroext %nbytes) local_unnamed_addr #0 {
   entry:
     %cmp = icmp eq i64 %nbytes, 0
     br i1 %cmp, label %if.else, label %if.then
 
   if.then:                                          ; preds = %entry
-    %call = tail call i8* @func_a(i64 zeroext %nbytes)
+    %call = tail call ptr @func_a(i64 zeroext %nbytes)
     br label %return
 
   if.else:                                          ; preds = %entry
-    %call1 = tail call i8* @func_b(i64 zeroext 0)
+    %call1 = tail call ptr @func_b(i64 zeroext 0)
     br label %return
 
   return:                                           ; preds = %if.else, %if.then
-    %retval.0 = phi i8* [ %call, %if.then ], [ %call1, %if.else ]
-    ret i8* %retval.0
+    %retval.0 = phi ptr [ %call, %if.then ], [ %call1, %if.else ]
+    ret ptr %retval.0
   }
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir
index 02fef74eeefb0..fe72a1720fb01 100644
--- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir
+++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir
@@ -7,7 +7,7 @@
 
 # CHECK: Bad machine code: invalid instruction when using jump guards!
 --- |
-  define i32 @fooTail(i32 (i32)* nocapture %f1) {
+  define i32 @fooTail(ptr nocapture %f1) {
   entry:
     %0 = tail call i32 %f1(i32 14)
     ret i32 %0

diff  --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir
index d313cad4d8d53..e87af5892e34b 100644
--- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir
+++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir
@@ -7,7 +7,7 @@
 
 # CHECK: Bad machine code: invalid instruction when using jump guards!
 --- |
-  define i32 @fooTail(i32 (i32)* nocapture %f1) {
+  define i32 @fooTail(ptr nocapture %f1) {
   entry:
     %0 = tail call i32 %f1(i32 14)
     ret i32 %0

diff  --git a/llvm/test/CodeGen/Mips/micromips-eva.mir b/llvm/test/CodeGen/Mips/micromips-eva.mir
index 3ab4b22737c4d..f45118d7fb7b2 100644
--- a/llvm/test/CodeGen/Mips/micromips-eva.mir
+++ b/llvm/test/CodeGen/Mips/micromips-eva.mir
@@ -10,40 +10,40 @@
   ; Function Attrs: noinline nounwind optnone
   define void @_Z3foov() {
   entry:
-    %0 = load i8, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5), align 1
+    %0 = load i8, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5), align 1
     %conv = sext i8 %0 to i32
     %sub = sub nsw i32 %conv, 7
     %conv1 = trunc i32 %sub to i8
-    store i8 %conv1, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3), align 1
-    %1 = load i8, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5), align 1
+    store i8 %conv1, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3), align 1
+    %1 = load i8, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5), align 1
     %conv2 = sext i8 %1 to i32
     %sub3 = sub nsw i32 %conv2, 7
     %conv4 = trunc i32 %sub3 to i8
-    store i8 %conv4, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3), align 1
-    %2 = load i16, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5), align 2
+    store i8 %conv4, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3), align 1
+    %2 = load i16, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5), align 2
     %conv5 = sext i16 %2 to i32
     %sub6 = sub nsw i32 %conv5, 7
     %conv7 = trunc i32 %sub6 to i16
-    store i16 %conv7, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3), align 2
-    %3 = load i16, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5), align 2
+    store i16 %conv7, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3), align 2
+    %3 = load i16, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5), align 2
     %conv8 = sext i16 %3 to i32
     %sub9 = sub nsw i32 %conv8, 7
     %conv10 = trunc i32 %sub9 to i16
-    store i16 %conv10, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3), align 2
-    %4 = load i32, i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 5), align 4
+    store i16 %conv10, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3), align 2
+    %4 = load i32, ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 5), align 4
     %sub11 = sub nsw i32 %4, 7
-    store i32 %sub11, i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 3), align 4
+    store i32 %sub11, ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 3), align 4
     ret void
   }
 
   ; Function Attrs: noinline nounwind optnone
-  define i32 @_Z3barPi(i32* %z) {
+  define i32 @_Z3barPi(ptr %z) {
   entry:
-    %z.addr = alloca i32*, align 4
-    store i32* %z, i32** %z.addr, align 4
-    %0 = load i32*, i32** %z.addr, align 4
+    %z.addr = alloca ptr, align 4
+    store ptr %z, ptr %z.addr, align 4
+    %0 = load ptr, ptr %z.addr, align 4
     fence seq_cst
-    %1 = atomicrmw add i32* %0, i32 42 monotonic
+    %1 = atomicrmw add ptr %0, i32 42 monotonic
     fence seq_cst
     %2 = add i32 %1, 42
     ret i32 %2
@@ -100,25 +100,25 @@ body:             |
   bb.0.entry:
     %0:gpr32 = LUi target-flags(mips-abs-hi) @bArray
     %1:gpr32 = ADDiu killed %0, target-flags(mips-abs-lo) @bArray
-    %2:gpr32 = LBuE %1, 5 :: (dereferenceable load (s8) from `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5)`)
+    %2:gpr32 = LBuE %1, 5 :: (dereferenceable load (s8) from `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5)`)
     %3:gpr32 = ADDiu killed %2, -7
-    SBE killed %3, %1, 3 :: (store (s8) into `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3)`)
-    %4:gpr32 = LBE %1, 5 :: (dereferenceable load (s8) from `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5)`)
+    SBE killed %3, %1, 3 :: (store (s8) into `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3)`)
+    %4:gpr32 = LBE %1, 5 :: (dereferenceable load (s8) from `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5)`)
     %5:gpr32 = ADDiu killed %4, -7
-    SBE killed %5, %1, 3 :: (store (s8) into `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3)`)
+    SBE killed %5, %1, 3 :: (store (s8) into `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3)`)
     %6:gpr32 = LUi target-flags(mips-abs-hi) @hArray
     %7:gpr32 = ADDiu killed %6, target-flags(mips-abs-lo) @hArray
-    %8:gpr32 = LHuE %7, 10 :: (dereferenceable load (s16) from `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5)`)
+    %8:gpr32 = LHuE %7, 10 :: (dereferenceable load (s16) from `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5)`)
     %9:gpr32 = ADDiu killed %8, -7
-    SHE killed %9, %7, 6 :: (store (s16) into `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3)`)
-    %10:gpr32 = LHE %7, 10 :: (dereferenceable load (s16) from `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5)`)
+    SHE killed %9, %7, 6 :: (store (s16) into `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3)`)
+    %10:gpr32 = LHE %7, 10 :: (dereferenceable load (s16) from `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5)`)
     %11:gpr32 = ADDiu killed %10, -7
-    SHE killed %11, %7, 6 :: (store (s16) into `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3)`)
+    SHE killed %11, %7, 6 :: (store (s16) into `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3)`)
     %12:gpr32 = LUi target-flags(mips-abs-hi) @wArray
     %13:gpr32 = ADDiu killed %12, target-flags(mips-abs-lo) @wArray
-    %14:gpr32 = LWE %13, 20 :: (dereferenceable load (s32) from `i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 5)`)
+    %14:gpr32 = LWE %13, 20 :: (dereferenceable load (s32) from `ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 5)`)
     %15:gpr32 = ADDiu killed %14, -7
-    SWE killed %15, %13, 12 :: (store (s32) into `i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 3)`)
+    SWE killed %15, %13, 12 :: (store (s32) into `ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 3)`)
     RetRA
 
 ...

diff  --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir
index feab7ecb671af..7ffdb409fd2b6 100644
--- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir
+++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir
@@ -3,15 +3,15 @@
 # RUN:      %s -o - | FileCheck %s
 
 --- |
-  define void @f1(i32* %adr, i32 %val) { ret void }
-  define void @f2(i32* %adr, i32 %val) { ret void }
-  define void @f3(i32* %adr, i32 %val) { ret void }
-  define void @f4(i32* %adr, i32 %val) { ret void }
+  define void @f1(ptr %adr, i32 %val) { ret void }
+  define void @f2(ptr %adr, i32 %val) { ret void }
+  define void @f3(ptr %adr, i32 %val) { ret void }
+  define void @f4(ptr %adr, i32 %val) { ret void }
 
-  declare i32* @f()
+  declare ptr @f()
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**)
+  declare void @llvm.stackprotector(ptr, ptr)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir
index c9044f306b922..2b136a3ff499a 100644
--- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir
+++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir
@@ -3,15 +3,15 @@
 # RUN:      %s -o - | FileCheck %s
 
 --- |
-  define void @f1(i32* %adr, i32 %val) { ret void }
-  define void @f2(i32* %adr, i32 %val) { ret void }
-  define void @f3(i32* %adr, i32 %val) { ret void }
-  define void @f4(i32* %adr, i32 %val) { ret void }
+  define void @f1(ptr %adr, i32 %val) { ret void }
+  define void @f2(ptr %adr, i32 %val) { ret void }
+  define void @f3(ptr %adr, i32 %val) { ret void }
+  define void @f4(ptr %adr, i32 %val) { ret void }
 
-  declare i32* @f()
+  declare ptr @f()
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**)
+  declare void @llvm.stackprotector(ptr, ptr)
 ...
 ---
 # CHECK-LABEL: name: f1

diff  --git a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir
index 00b40898549ff..4370084f2621d 100644
--- a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir
+++ b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir
@@ -19,32 +19,32 @@
   entry:
     %call = tail call i32 @_Z1gi(i32 signext %asd)
     %add = add nsw i32 %call, %asd
-    %0 = load i32, i32* @v, align 4
+    %0 = load i32, ptr @v, align 4
     %add1 = add nsw i32 %add, %0
-    %.b.i.i = load i1, i1* @__tls_guard, align 1
+    %.b.i.i = load i1, ptr @__tls_guard, align 1
     br i1 %.b.i.i, label %entry._ZTW1k.exit_crit_edge, label %init.i.i
 
   entry._ZTW1k.exit_crit_edge:
-    %.pre = load i32, i32* @k, align 4
+    %.pre = load i32, ptr @k, align 4
     br label %_ZTW1k.exit
 
   init.i.i:
-    store i1 true, i1* @__tls_guard, align 1
+    store i1 true, ptr @__tls_guard, align 1
     %call.i.i.i = tail call i32 @_Z1gi(i32 signext 3)
-    store i32 %call.i.i.i, i32* @k, align 4
+    store i32 %call.i.i.i, ptr @k, align 4
     br label %_ZTW1k.exit
 
   _ZTW1k.exit:
     %1 = phi i32 [ %.pre, %entry._ZTW1k.exit_crit_edge ], [ %call.i.i.i, %init.i.i ]
     %add2 = add nsw i32 %add1, %1
-    br i1 icmp ne (void ()* @_ZTH1j, void ()* null), label %2, label %_ZTW1j.exit
+    br i1 icmp ne (ptr @_ZTH1j, ptr null), label %2, label %_ZTW1j.exit
 
   ; <label>:2:
     tail call void @_ZTH1j()
     br label %_ZTW1j.exit
 
   _ZTW1j.exit:
-    %3 = load i32, i32* @j, align 4
+    %3 = load i32, ptr @j, align 4
     %add3 = add nsw i32 %add2, %3
     ret i32 %add3
   }

diff  --git a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir
index 1e646f77b2b23..d9703c74fc5ce 100644
--- a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir
+++ b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir
@@ -13,9 +13,9 @@
   entry:
     %call = tail call i32 @_Z1gi(i32 signext %asd)
     %add = add nsw i32 %call, %asd
-    %0 = load i32, i32* @v, align 4
+    %0 = load i32, ptr @v, align 4
     %add1 = add nsw i32 %add, %0
-    %1 = load i32, i32* @j, align 4
+    %1 = load i32, ptr @j, align 4
     %add2 = add nsw i32 %add1, %1
     ret i32 %add2
   }

diff  --git a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic.mir b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic.mir
index 7dd9f299e25fc..803a2d701fae4 100644
--- a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic.mir
+++ b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic.mir
@@ -13,9 +13,9 @@
   entry:
     %call = tail call i32 @_Z1gi(i32 signext %asd)
     %add = add nsw i32 %call, %asd
-    %0 = load i32, i32* @v, align 4
+    %0 = load i32, ptr @v, align 4
     %add1 = add nsw i32 %add, %0
-    %1 = load i32, i32* @j, align 4
+    %1 = load i32, ptr @j, align 4
     %add2 = add nsw i32 %add1, %1
     ret i32 %add2
   }

diff  --git a/llvm/test/CodeGen/Mips/mirparser/target-flags-static-tls.mir b/llvm/test/CodeGen/Mips/mirparser/target-flags-static-tls.mir
index da0ff99375561..e273b8e8ce6a6 100644
--- a/llvm/test/CodeGen/Mips/mirparser/target-flags-static-tls.mir
+++ b/llvm/test/CodeGen/Mips/mirparser/target-flags-static-tls.mir
@@ -18,32 +18,32 @@
   entry:
     %call = tail call i32 @_Z1gi(i32 signext %asd)
     %add = add nsw i32 %call, %asd
-    %0 = load i32, i32* @v, align 4
+    %0 = load i32, ptr @v, align 4
     %add1 = add nsw i32 %add, %0
-    %.b.i.i = load i1, i1* @__tls_guard, align 1
+    %.b.i.i = load i1, ptr @__tls_guard, align 1
     br i1 %.b.i.i, label %entry._ZTW1k.exit_crit_edge, label %init.i.i
 
   entry._ZTW1k.exit_crit_edge:
-    %.pre = load i32, i32* @k, align 4
+    %.pre = load i32, ptr @k, align 4
     br label %_ZTW1k.exit
 
   init.i.i:
-    store i1 true, i1* @__tls_guard, align 1
+    store i1 true, ptr @__tls_guard, align 1
     %call.i.i.i = tail call i32 @_Z1gi(i32 signext 3)
-    store i32 %call.i.i.i, i32* @k, align 4
+    store i32 %call.i.i.i, ptr @k, align 4
     br label %_ZTW1k.exit
 
   _ZTW1k.exit:
     %1 = phi i32 [ %.pre, %entry._ZTW1k.exit_crit_edge ], [ %call.i.i.i, %init.i.i ]
     %add2 = add nsw i32 %add1, %1
-    br i1 icmp ne (void ()* @_ZTH1j, void ()* null), label %2, label %_ZTW1j.exit
+    br i1 icmp ne (ptr @_ZTH1j, ptr null), label %2, label %_ZTW1j.exit
 
   ; <label>:2:
     tail call void @_ZTH1j()
     br label %_ZTW1j.exit
 
   _ZTW1j.exit:
-    %3 = load i32, i32* @j, align 4
+    %3 = load i32, ptr @j, align 4
     %add3 = add nsw i32 %add2, %3
     ret i32 %add3
   }

diff  --git a/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir b/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir
index e5ff7ec9dfb5e..662b9bc4b66d0 100644
--- a/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir
+++ b/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir
@@ -4,17 +4,17 @@
 # Test that MIPS unaligned load/store instructions can be mapped to their
 # corresponding microMIPS instructions.
 --- |
-  define void @g(i32* %a, i32* %b) {
+  define void @g(ptr %a, ptr %b) {
   entry:
-    %0 = load i32, i32* %a, align 1
-    store i32 %0, i32* %b, align 1
+    %0 = load i32, ptr %a, align 1
+    store i32 %0, ptr %b, align 1
     ret void
   }
 
-  define void @g2(i32* %a, i32* %b) {
+  define void @g2(ptr %a, ptr %b) {
   entry:
-    %0 = load i32, i32* %a, align 1
-    store i32 %0, i32* %b, align 1
+    %0 = load i32, ptr %a, align 1
+    store i32 %0, ptr %b, align 1
     ret void
   }
 ...

diff  --git a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir
index 8eef233e44fca..9706873dc37fd 100644
--- a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir
+++ b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessNoProfileData.mir
@@ -20,7 +20,7 @@
 --- |
   target datalayout = "e-m:e-i64:64-n32:64"
 
-  define dso_local void @test(void (i32)* nocapture %fp, i32 signext %Arg, i32 signext %Len, i32* nocapture %Ptr) {
+  define dso_local void @test(ptr nocapture %fp, i32 signext %Arg, i32 signext %Len, ptr nocapture %Ptr) {
   entry:
     tail call void asm sideeffect "#NOTHING", "~{r2}"()
     %cmp6 = icmp sgt i32 %Len, 0
@@ -35,9 +35,9 @@
 
   for.body:                                         ; preds = %for.inc, %for.body.lr.ph
     %i.07 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
-    %0 = load i32, i32* %Ptr, align 4
+    %0 = load i32, ptr %Ptr, align 4
     %1 = add i32 %i.07, %0
-    store i32 %1, i32* %Ptr, align 4
+    store i32 %1, ptr %Ptr, align 4
     br i1 %cmp1, label %if.then, label %for.inc
 
   if.then:                                          ; preds = %for.body
@@ -52,7 +52,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir
index ad16daa7783ab..88ee6633c15fe 100644
--- a/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir
+++ b/llvm/test/CodeGen/PowerPC/DisableHoistingDueToBlockHotnessProfileData.mir
@@ -21,7 +21,7 @@
   target triple = "powerpc64le-unknown-linux-gnu"
 
   ; Function Attrs: nounwind
-  define dso_local void @test(void (i32)* nocapture %fp, i32 signext %Arg, i32 signext %Len, i32* nocapture %Ptr) local_unnamed_addr #0 !prof !29 !section_prefix !30 {
+  define dso_local void @test(ptr nocapture %fp, i32 signext %Arg, i32 signext %Len, ptr nocapture %Ptr) local_unnamed_addr #0 !prof !29 !section_prefix !30 {
   entry:
     tail call void asm sideeffect "#NOTHING", "~{r2}"() #1, !srcloc !31
     %cmp6 = icmp sgt i32 %Len, 0
@@ -36,9 +36,9 @@
 
   for.body:                                         ; preds = %for.inc, %for.body.lr.ph
     %i.07 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
-    %0 = load i32, i32* %Ptr, align 4, !tbaa !33
+    %0 = load i32, ptr %Ptr, align 4, !tbaa !33
     %1 = add i32 %i.07, %0
-    store i32 %1, i32* %Ptr, align 4, !tbaa !33
+    store i32 %1, ptr %Ptr, align 4, !tbaa !33
     br i1 %cmp1, label %if.then, label %for.inc, !prof !37
 
   if.then:                                          ; preds = %for.body
@@ -53,7 +53,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
index 96a0b4123aa46..41e21248a3f0e 100644
--- a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
+++ b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
@@ -28,7 +28,7 @@
   }
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/aantidep-def-ec.mir b/llvm/test/CodeGen/PowerPC/aantidep-def-ec.mir
index 56ce8cc8ff0df..13db5281a641c 100644
--- a/llvm/test/CodeGen/PowerPC/aantidep-def-ec.mir
+++ b/llvm/test/CodeGen/PowerPC/aantidep-def-ec.mir
@@ -9,21 +9,20 @@
   @tasklist_lock = external global %struct.rwlock_t.0.22.58.68.242.244, align 1
   
   ; Function Attrs: nounwind
-  define void @mm_update_next_owner(i8** %p1, i32* %p2) #0 {
+  define void @mm_update_next_owner(ptr %p1, ptr %p2) #0 {
   entry:
-    %0 = load i8*, i8** %p1, align 8
+    %0 = load ptr, ptr %p1, align 8
     br i1 undef, label %do.body.92, label %for.body.21
   
   for.body.21:                                      ; preds = %entry
     unreachable
   
   do.body.92:                                       ; preds = %entry
-    %usage = getelementptr inbounds i8, i8* %0, i64 -48
-    %counter.i = bitcast i8* %usage to i32*
-    %call95 = tail call signext i32 bitcast (i32 (...)* @__raw_read_unlock to i32 (%struct.rwlock_t.0.22.58.68.242.244*)*)(%struct.rwlock_t.0.22.58.68.242.244* nonnull @tasklist_lock) #1
-    store volatile i32 0, i32* %p2, align 4
+    %usage = getelementptr inbounds i8, ptr %0, i64 -48
+    %call95 = tail call signext i32 @__raw_read_unlock(ptr nonnull @tasklist_lock) #1
+    store volatile i32 0, ptr %p2, align 4
     tail call void asm sideeffect "#compiler barrier", "~{memory}"() #1
-    %1 = tail call i32 asm sideeffect "\0Alwsync \0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0Async \0A", "=&r,r,~{cc},~{xer},~{memory}"(i32* %counter.i) #1
+    %1 = tail call i32 asm sideeffect "\0Alwsync \0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0Async \0A", "=&r,r,~{cc},~{xer},~{memory}"(ptr %usage) #1
     %cmp.i = icmp eq i32 %1, 0
     br i1 %cmp.i, label %if.then.i, label %put_task_struct.exit
   

diff  --git a/llvm/test/CodeGen/PowerPC/addisdtprelha-nonr3.mir b/llvm/test/CodeGen/PowerPC/addisdtprelha-nonr3.mir
index c52a55f17b303..e74fc9e12f155 100644
--- a/llvm/test/CodeGen/PowerPC/addisdtprelha-nonr3.mir
+++ b/llvm/test/CodeGen/PowerPC/addisdtprelha-nonr3.mir
@@ -10,8 +10,8 @@
   ; Function Attrs: nounwind
   define void @test1() #0 {
   entry:
-    store i1 true, i1* @x, align 1
-    store i32 20, i32* @y, align 4
+    store i1 true, ptr @x, align 1
+    store i32 20, ptr @y, align 4
     ret void
   }
   

diff  --git a/llvm/test/CodeGen/PowerPC/block-placement-1.mir b/llvm/test/CodeGen/PowerPC/block-placement-1.mir
index cff7f33d9de17..f91ab630112ca 100644
--- a/llvm/test/CodeGen/PowerPC/block-placement-1.mir
+++ b/llvm/test/CodeGen/PowerPC/block-placement-1.mir
@@ -5,9 +5,9 @@
   source_filename = "test.ll"
   target datalayout = "e-m:e-i64:64-n32:64"
   
-  @_ZTIl = external constant i8*
-  @_ZTIi = external constant i8*
-  @_ZTIc = external constant i8*
+  @_ZTIl = external constant ptr
+  @_ZTIi = external constant ptr
+  @_ZTIc = external constant ptr
   
   define dso_local void @_Z6calleev() local_unnamed_addr {
   entry:
@@ -17,7 +17,7 @@
   
   declare void @__cxa_rethrow() local_unnamed_addr
   
-  define dso_local void @_Z14TestSinglePredv() local_unnamed_addr personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+  define dso_local void @_Z14TestSinglePredv() local_unnamed_addr personality ptr @__gxx_personality_v0 {
   entry:
     br label %for.body
   
@@ -33,25 +33,25 @@
     unreachable
   
   lpad:                                             ; preds = %for.body
-    %0 = landingpad { i8*, i32 }
-            catch i8* bitcast (i8** @_ZTIl to i8*)
-            catch i8* bitcast (i8** @_ZTIi to i8*)
-            catch i8* null
-    %1 = extractvalue { i8*, i32 } %0, 0
-    %2 = extractvalue { i8*, i32 } %0, 1
-    %3 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIl to i8*))
+    %0 = landingpad { ptr, i32 }
+            catch ptr @_ZTIl
+            catch ptr @_ZTIi
+            catch ptr null
+    %1 = extractvalue { ptr, i32 } %0, 0
+    %2 = extractvalue { ptr, i32 } %0, 1
+    %3 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIl)
     %matches = icmp eq i32 %2, %3
     br i1 %matches, label %catch4, label %catch.fallthrough
   
   catch4:                                           ; preds = %lpad
-    %4 = tail call i8* @__cxa_begin_catch(i8* %1)
+    %4 = tail call ptr @__cxa_begin_catch(ptr %1)
     invoke void @__cxa_rethrow()
             to label %unreachable unwind label %lpad6
   
   catch.fallthrough:                                ; preds = %lpad
-    %5 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+    %5 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi)
     %matches1 = icmp eq i32 %2, %5
-    %6 = tail call i8* @__cxa_begin_catch(i8* %1)
+    %6 = tail call ptr @__cxa_begin_catch(ptr %1)
     br i1 %matches1, label %catch2, label %catch
   
   catch2:                                           ; preds = %catch.fallthrough
@@ -63,17 +63,17 @@
     br label %for.inc
   
   lpad6:                                            ; preds = %catch4
-    %7 = landingpad { i8*, i32 }
+    %7 = landingpad { ptr, i32 }
             cleanup
-            catch i8* bitcast (i8** @_ZTIc to i8*)
-    %8 = extractvalue { i8*, i32 } %7, 1
-    %9 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIc to i8*))
+            catch ptr @_ZTIc
+    %8 = extractvalue { ptr, i32 } %7, 1
+    %9 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIc)
     %matches9 = icmp eq i32 %8, %9
     br i1 %matches9, label %catch10, label %ehcleanup
   
   catch10:                                          ; preds = %lpad6
-    %10 = extractvalue { i8*, i32 } %7, 0
-    %11 = tail call i8* @__cxa_begin_catch(i8* %10)
+    %10 = extractvalue { ptr, i32 } %7, 0
+    %11 = tail call ptr @__cxa_begin_catch(ptr %10)
     tail call void @__cxa_end_catch()
     tail call void @__cxa_end_catch()
     br label %for.inc
@@ -85,8 +85,8 @@
   
   ehcleanup:                                        ; preds = %lpad6
     tail call void @__cxa_end_catch()
-    %exn.obj = extractvalue { i8*, i32 } %7, 0
-    call void @_Unwind_Resume(i8* %exn.obj)
+    %exn.obj = extractvalue { ptr, i32 } %7, 0
+    call void @_Unwind_Resume(ptr %exn.obj)
     unreachable
   
   unreachable:                                      ; preds = %catch4
@@ -96,16 +96,16 @@
   declare i32 @__gxx_personality_v0(...)
   
   ; Function Attrs: nounwind readnone
-  declare i32 @llvm.eh.typeid.for(i8*) #0
+  declare i32 @llvm.eh.typeid.for(ptr) #0
   
-  declare i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+  declare ptr @__cxa_begin_catch(ptr) local_unnamed_addr
   
   declare void @__cxa_end_catch() local_unnamed_addr
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
-  declare void @_Unwind_Resume(i8*)
+  declare void @_Unwind_Resume(ptr)
   
   attributes #0 = { nounwind readnone }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir b/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir
index a9065f7f5c7f4..d8bd70acbfae4 100644
--- a/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir
+++ b/llvm/test/CodeGen/PowerPC/ctrloop-do-not-duplicate-mi.mir
@@ -7,12 +7,12 @@
   
   define dso_local void @test() local_unnamed_addr #0 {
   test_entry:
-    %_val_domain_ = load i32, i32* undef, align 4
+    %_val_domain_ = load i32, ptr undef, align 4
     %_conv765 = sext i32 %_val_domain_ to i64
     br i1 undef, label %_label_42, label %_loop_40_loopHeader_
   
   _loop_40_loopHeader_:                             ; preds = %test_entry
-    %_val_flags_1020 = load i32, i32* undef, align 4
+    %_val_flags_1020 = load i32, ptr undef, align 4
     %0 = and i32 %_val_flags_1020, 1
     %_cond_conv_1022.not = icmp eq i32 %0, 0
     %1 = sub i64 1, %_conv765
@@ -37,7 +37,7 @@
   
   _label_42.loopexit:                               ; preds = %_loop_44_do_
     %3 = trunc i64 %lsr.iv to i32
-    store i32 %3, i32* undef, align 4
+    store i32 %3, ptr undef, align 4
     unreachable
   
   _label_42:                                        ; preds = %test_entry
@@ -130,7 +130,7 @@ body:             |
     successors: %bb.7(0x40000000), %bb.1(0x40000000)
   
     %5:g8rc_and_g8rc_nox0 = IMPLICIT_DEF
-    %0:g8rc = LWA 0, killed %5 :: (load (s32) from `i32* undef`)
+    %0:g8rc = LWA 0, killed %5 :: (load (s32) from `ptr undef`)
     %6:crbitrc = IMPLICIT_DEF
     BC killed %6, %bb.7
     B %bb.1
@@ -139,7 +139,7 @@ body:             |
     successors: %bb.3(0x80000000)
   
     %9:g8rc_and_g8rc_nox0 = IMPLICIT_DEF
-    %8:gprc = LWZ 0, %9 :: (load (s32) from `i32* undef`)
+    %8:gprc = LWZ 0, %9 :: (load (s32) from `ptr undef`)
     %15:gprc = ANDI_rec %8, 1, implicit-def $cr0
     %1:crbitrc = COPY $cr0eq
     %10:g8rc_and_g8rc_nox0 = SUBFIC8 %0, 1, implicit-def dead $carry
@@ -178,7 +178,7 @@ body:             |
     successors: 
   
     %14:g8rc_and_g8rc_nox0 = IMPLICIT_DEF
-    STW8 %4, 0, killed %14 :: (store (s32) into `i32* undef`)
+    STW8 %4, 0, killed %14 :: (store (s32) into `ptr undef`)
   
   bb.7._label_42:
 

diff  --git a/llvm/test/CodeGen/PowerPC/livevars-crash1.mir b/llvm/test/CodeGen/PowerPC/livevars-crash1.mir
index 8bb9ad1b44f30..6ddc2b022e9b5 100644
--- a/llvm/test/CodeGen/PowerPC/livevars-crash1.mir
+++ b/llvm/test/CodeGen/PowerPC/livevars-crash1.mir
@@ -4,25 +4,25 @@
 
 --- |
   ; Function Attrs: noreturn nounwind
-  define signext i32 @zext_free(i8** nocapture dereferenceable(8) %p) {
+  define signext i32 @zext_free(ptr nocapture dereferenceable(8) %p) {
   entry:
-    %.pre = load i8*, i8** %p, align 8
+    %.pre = load ptr, ptr %p, align 8
     br label %loop
   
   loop:                                             ; preds = %loop, %if.then3, %entry
-    %0 = phi i8* [ %.pre, %entry ], [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
-    %1 = load i8, i8* %0, align 1
+    %0 = phi ptr [ %.pre, %entry ], [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
+    %1 = load i8, ptr %0, align 1
     %tobool = icmp eq i8 %1, 0
-    %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
-    store i8* %incdec.ptr, i8** %p, align 8
-    %2 = load i8, i8* %incdec.ptr, align 1
+    %incdec.ptr = getelementptr inbounds i8, ptr %0, i64 1
+    store ptr %incdec.ptr, ptr %p, align 8
+    %2 = load i8, ptr %incdec.ptr, align 1
     %tobool2 = icmp ne i8 %2, 0
     %or.cond = and i1 %tobool, %tobool2
     br i1 %or.cond, label %if.then3, label %loop
   
   if.then3:                                         ; preds = %loop
-    %incdec.ptr4 = getelementptr inbounds i8, i8* %0, i64 2
-    store i8* %incdec.ptr4, i8** %p, align 8
+    %incdec.ptr4 = getelementptr inbounds i8, ptr %0, i64 2
+    store ptr %incdec.ptr4, ptr %p, align 8
     br label %loop
   }
   

diff  --git a/llvm/test/CodeGen/PowerPC/livevars-crash2.mir b/llvm/test/CodeGen/PowerPC/livevars-crash2.mir
index e397567f4e582..1ae24fd0b7015 100644
--- a/llvm/test/CodeGen/PowerPC/livevars-crash2.mir
+++ b/llvm/test/CodeGen/PowerPC/livevars-crash2.mir
@@ -12,25 +12,25 @@
     ret float %cond
   }
   
-  define signext i32 @select-i1-vs-i1(i8** nocapture dereferenceable(8) %p) #0 {
+  define signext i32 @select-i1-vs-i1(ptr nocapture dereferenceable(8) %p) #0 {
   entry:
-    %.pre = load i8*, i8** %p, align 8
+    %.pre = load ptr, ptr %p, align 8
     br label %loop
   
   loop:                                             ; preds = %loop, %if.then3, %entry
-    %0 = phi i8* [ %.pre, %entry ], [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
-    %1 = load i8, i8* %0, align 1
+    %0 = phi ptr [ %.pre, %entry ], [ %incdec.ptr4, %if.then3 ], [ %incdec.ptr, %loop ]
+    %1 = load i8, ptr %0, align 1
     %tobool = icmp eq i8 %1, 0
-    %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
-    store i8* %incdec.ptr, i8** %p, align 8
-    %2 = load i8, i8* %incdec.ptr, align 1
+    %incdec.ptr = getelementptr inbounds i8, ptr %0, i64 1
+    store ptr %incdec.ptr, ptr %p, align 8
+    %2 = load i8, ptr %incdec.ptr, align 1
     %tobool2 = icmp ne i8 %2, 0
     %or.cond = and i1 %tobool, %tobool2
     br i1 %or.cond, label %if.then3, label %loop
   
   if.then3:                                         ; preds = %loop
-    %incdec.ptr4 = getelementptr inbounds i8, i8* %0, i64 2
-    store i8* %incdec.ptr4, i8** %p, align 8
+    %incdec.ptr4 = getelementptr inbounds i8, ptr %0, i64 2
+    store ptr %incdec.ptr4, ptr %p, align 8
     br label %loop
   }
   

diff  --git a/llvm/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir b/llvm/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir
index 14eb65bda25e6..97c16048b18f1 100644
--- a/llvm/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir
+++ b/llvm/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir
@@ -5,30 +5,30 @@
   target triple = "powerpc64-unknown-linux-gnu"
 
   @d = global i32 15, align 4
-  @b = global i32* @d, align 8
+  @b = global ptr @d, align 8
   @a = common global i32 0, align 4
 
   ; Function Attrs: nounwind
   define signext i32 @main() #0 {
   entry:
-    %0 = load i32*, i32** @b, align 8
-    %1 = load i32, i32* @a, align 4
+    %0 = load ptr, ptr @b, align 8
+    %1 = load i32, ptr @a, align 4
     %lnot = icmp eq i32 %1, 0
     %lnot.ext = zext i1 %lnot to i32
     %shr.i = lshr i32 2072, %lnot.ext
     %call.lobit = lshr i32 %shr.i, 7
     %2 = and i32 %call.lobit, 1
-    %3 = load i32, i32* %0, align 4
+    %3 = load i32, ptr %0, align 4
     %or = or i32 %2, %3
-    store i32 %or, i32* %0, align 4
-    %4 = load i32, i32* @a, align 4
+    store i32 %or, ptr %0, align 4
+    %4 = load i32, ptr @a, align 4
     %lnot.1 = icmp eq i32 %4, 0
     %lnot.ext.1 = zext i1 %lnot.1 to i32
     %shr.i.1 = lshr i32 2072, %lnot.ext.1
     %call.lobit.1 = lshr i32 %shr.i.1, 7
     %5 = and i32 %call.lobit.1, 1
     %or.1 = or i32 %5, %or
-    store i32 %or.1, i32* %0, align 4
+    store i32 %or.1, ptr %0, align 4
     ret i32 %or.1
   }
 

diff  --git a/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir b/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir
index 2a3123a636ed4..8140249cb3c7d 100644
--- a/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir
+++ b/llvm/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir
@@ -22,7 +22,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { nounwind readnone }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir b/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir
index 893070844b567..116a9529d280e 100644
--- a/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir
+++ b/llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir
@@ -8,7 +8,7 @@
 # with an unprimed accumulator PHI node cycle.
 
 --- |
-  define dso_local void @phiCopy(i32 signext %i, <16 x i8> %vc, <512 x i1>* nocapture %ptr) {
+  define dso_local void @phiCopy(i32 signext %i, <16 x i8> %vc, ptr nocapture %ptr) {
   entry:
     %0 = tail call <512 x i1> @llvm.ppc.mma.xxsetaccz()
     %tobool.not = icmp eq i32 %i, 0
@@ -20,7 +20,7 @@
 
   if.end:
     %vq.0 = phi <512 x i1> [ %1, %if.then ], [ %0, %entry ]
-    store <512 x i1> %vq.0, <512 x i1>* %ptr, align 64
+    store <512 x i1> %vq.0, ptr %ptr, align 64
     ret void
   }
 
@@ -28,7 +28,7 @@
 
   declare <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1>, <16 x i8>, <16 x i8>)
 
-  define dso_local void @phiCopyUndef(i32 signext %i, <16 x i8> %vc, <512 x i1>* nocapture %ptr) {
+  define dso_local void @phiCopyUndef(i32 signext %i, <16 x i8> %vc, ptr nocapture %ptr) {
   entry:
     %tobool.not = icmp eq i32 %i, 0
     br i1 %tobool.not, label %if.end, label %if.then
@@ -39,11 +39,11 @@
 
   if.end:
     %vq.0 = phi <512 x i1> [ %0, %if.then ], [ undef, %entry ]
-    store <512 x i1> %vq.0, <512 x i1>* %ptr, align 64
+    store <512 x i1> %vq.0, ptr %ptr, align 64
     ret void
   }
 
-  define dso_local void @phiPhis(i32 signext %i, <16 x i8> %vc, <512 x i1>* nocapture %ptr) {
+  define dso_local void @phiPhis(i32 signext %i, <16 x i8> %vc, ptr nocapture %ptr) {
   entry:
     %cmp6 = icmp sgt i32 %i, 0
     br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
@@ -83,8 +83,8 @@
 
   for.cond.cleanup:
     %vq.0.lcssa = phi <512 x i1> [ undef, %entry ], [ %vq.07.unr, %for.cond.cleanup.loopexit.unr-lcssa ], [ %9, %for.body.epil ]
-    %add.ptr = getelementptr inbounds <512 x i1>, <512 x i1>* %ptr, i64 1
-    store <512 x i1> %vq.0.lcssa, <512 x i1>* %add.ptr, align 64
+    %add.ptr = getelementptr inbounds <512 x i1>, ptr %ptr, i64 1
+    store <512 x i1> %vq.0.lcssa, ptr %add.ptr, align 64
     ret void
 
   for.body:
@@ -101,7 +101,7 @@
     br i1 %19, label %for.body, label %for.cond.cleanup.loopexit.unr-lcssa
   }
 
-  define dso_local void @phiCycle(i32 signext %i, <16 x i8> %vc, <512 x i1>* nocapture %ptr) {
+  define dso_local void @phiCycle(i32 signext %i, <16 x i8> %vc, ptr nocapture %ptr) {
   entry:
     %cmp6 = icmp sgt i32 %i, 0
     br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
@@ -143,8 +143,8 @@
 
   for.cond.cleanup:
     %vq.0.lcssa = phi <512 x i1> [ undef, %entry ], [ %vq.07.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
-    %add.ptr = getelementptr inbounds <512 x i1>, <512 x i1>* %ptr, i64 1
-    store <512 x i1> %vq.0.lcssa, <512 x i1>* %add.ptr, align 64
+    %add.ptr = getelementptr inbounds <512 x i1>, ptr %ptr, i64 1
+    store <512 x i1> %vq.0.lcssa, ptr %add.ptr, align 64
     ret void
 
   for.body:

diff  --git a/llvm/test/CodeGen/PowerPC/phi-eliminate.mir b/llvm/test/CodeGen/PowerPC/phi-eliminate.mir
index a79f2586850fe..f50d92772e345 100644
--- a/llvm/test/CodeGen/PowerPC/phi-eliminate.mir
+++ b/llvm/test/CodeGen/PowerPC/phi-eliminate.mir
@@ -2,15 +2,15 @@
 # RUN:   -run-pass=livevars,phi-node-elimination | FileCheck %s
 
 --- |
-  define void @phi_eliminate(i32 %0, i32 %1, i8* %2) {
-    %scevgep3 = getelementptr i8, i8* %2, i64 undef
+  define void @phi_eliminate(i32 %0, i32 %1, ptr %2) {
+    %scevgep3 = getelementptr i8, ptr %2, i64 undef
     call void @llvm.set.loop.iterations.i64(i64 undef)
     br label %4
 
   4:                                                ; preds = %4, %3
     %5 = phi i32 [ %8, %4 ], [ %0, %3 ]
-    %6 = phi i8* [ %scevgep3, %3 ], [ %7, %4 ]
-    %7 = getelementptr i8, i8* %6, i64 -1
+    %6 = phi ptr [ %scevgep3, %3 ], [ %7, %4 ]
+    %7 = getelementptr i8, ptr %6, i64 -1
     %8 = sdiv i32 %5, %1
     %9 = mul nsw i32 %8, %1
     %10 = sub nsw i32 %5, %9
@@ -18,7 +18,7 @@
     %12 = trunc i32 %10 to i8
     %13 = select i1 %11, i8 48, i8 55
     %14 = add i8 %13, %12
-    store i8 %14, i8* %7, align 1
+    store i8 %14, ptr %7, align 1
     %15 = call i1 @llvm.loop.decrement.i64(i64 1)
     br i1 %15, label %4, label %16
 
@@ -30,7 +30,7 @@
 
   declare i1 @llvm.loop.decrement.i64(i64)
 
-  declare void @llvm.stackprotector(i8*, i8**)
+  declare void @llvm.stackprotector(ptr, ptr)
 ...
 ---
 name:            phi_eliminate

diff  --git a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
index 403c7e4ceb94a..67b64b447a5fa 100644
--- a/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-copy-crunsetcrbit.mir
@@ -5,19 +5,19 @@
 
   @b = common dso_local local_unnamed_addr global i32 0, align 4
   @d = common dso_local local_unnamed_addr global i32 0, align 4
-  @e = common dso_local local_unnamed_addr global i32* null, align 8
+  @e = common dso_local local_unnamed_addr global ptr null, align 8
   @c = common dso_local local_unnamed_addr global i32 0, align 4
   @a = common dso_local local_unnamed_addr global [1 x i32] zeroinitializer, align 4
 
   ; Function Attrs: norecurse nounwind
   define dso_local signext i32 @copycrunset() local_unnamed_addr #0 {
   entry:
-    %0 = load i32, i32* @b, align 4
+    %0 = load i32, ptr @b, align 4
     %tobool3 = icmp eq i32 %0, 0
     br i1 %tobool3, label %while.end, label %while.body.preheader
 
   while.body.preheader:                             ; preds = %entry
-    %.pre = load i32, i32* @d, align 4
+    %.pre = load i32, ptr @d, align 4
     %tobool1 = icmp eq i32 %.pre, 0
     br label %while.body
 
@@ -25,18 +25,18 @@
     br i1 %tobool1, label %land.end, label %land.rhs
 
   land.rhs:                                         ; preds = %while.body
-    %1 = load i32*, i32** @e, align 8
-    %2 = load i32, i32* %1, align 4
+    %1 = load ptr, ptr @e, align 8
+    %2 = load i32, ptr %1, align 4
     %idxprom = sext i32 %2 to i64
-    %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* @a, i64 0, i64 %idxprom
-    %3 = load i32, i32* %arrayidx, align 4
+    %arrayidx = getelementptr inbounds [1 x i32], ptr @a, i64 0, i64 %idxprom
+    %3 = load i32, ptr %arrayidx, align 4
     %tobool2 = icmp ne i32 %3, 0
     br label %land.end
 
   land.end:                                         ; preds = %land.rhs, %while.body
     %4 = phi i1 [ false, %while.body ], [ %tobool2, %land.rhs ]
     %land.ext = zext i1 %4 to i32
-    store i32 %land.ext, i32* @c, align 4
+    store i32 %land.ext, ptr @c, align 4
     br label %while.body
 
   while.end:                                        ; preds = %entry

diff  --git a/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir b/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir
index f0a45465f465d..913877b0d4318 100644
--- a/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir
+++ b/llvm/test/CodeGen/PowerPC/remove-redundant-li-skip-imp-kill.mir
@@ -7,10 +7,10 @@
   target datalayout = "e-m:e-i64:64-n32:64"
 
   ; Function Attrs: nounwind
-  define dso_local signext i32 @b(i32 signext %a, i32* nocapture %b) local_unnamed_addr #0 {
+  define dso_local signext i32 @b(i32 signext %a, ptr nocapture %b) local_unnamed_addr #0 {
   entry:
     %call = tail call signext i32 @g(i32 signext %a)
-    store i32 %call, i32* %b, align 4
+    store i32 %call, ptr %b, align 4
     %call1 = tail call signext i32 @g(i32 signext %a)
     ret i32 %call1
   }
@@ -19,7 +19,7 @@
   declare signext i32 @g(i32 signext) local_unnamed_addr #0
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir b/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir
index c992f4dd7bcad..9caa6765dfb4b 100644
--- a/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir
+++ b/llvm/test/CodeGen/PowerPC/schedule-addi-load.mir
@@ -7,17 +7,17 @@
 --- |
   target datalayout = "e-m:e-i64:64-n32:64"
   
-  define i64 @foo(i8* %p, i8* %q) {
+  define i64 @foo(ptr %p, ptr %q) {
   entry:
     br label %while.cond6.i
   
   while.cond6.i:                                    
     %n.0 = phi i64 [ 0, %entry ], [ %n.1, %while.cond6.i ]
     %conv = and i64 %n.0, 4294967295
-    %arrayidx = getelementptr inbounds i8, i8* %p, i64 %conv
-    %0 = load i8, i8* %arrayidx, align 1
-    %arrayidx4 = getelementptr inbounds i8, i8* %q, i64 %conv
-    %1 = load i8, i8* %arrayidx4, align 1
+    %arrayidx = getelementptr inbounds i8, ptr %p, i64 %conv
+    %0 = load i8, ptr %arrayidx, align 1
+    %arrayidx4 = getelementptr inbounds i8, ptr %q, i64 %conv
+    %1 = load i8, ptr %arrayidx4, align 1
     %cmp = icmp eq i8 %0, %1
     %n.1 = add i64 %conv, 1
     br i1 %cmp, label %while.cond6.i, label %while.end

diff  --git a/llvm/test/CodeGen/PowerPC/sext_elimination.mir b/llvm/test/CodeGen/PowerPC/sext_elimination.mir
index 93feb9cb2e9e6..cafcdc3e76ca2 100644
--- a/llvm/test/CodeGen/PowerPC/sext_elimination.mir
+++ b/llvm/test/CodeGen/PowerPC/sext_elimination.mir
@@ -3,9 +3,9 @@
 --- |
   target datalayout = "E-m:e-i64:64-n32:64"
   target triple = "powerpc64le-unknown-linux-gnu"
-  define i8* @func(i8* %a) {
+  define ptr @func(ptr %a) {
   entry:
-    ret i8* %a
+    ret ptr %a
   }
   
 ...

diff  --git a/llvm/test/CodeGen/PowerPC/shrink-wrap.mir b/llvm/test/CodeGen/PowerPC/shrink-wrap.mir
index 5c3b6ad347ca4..f0540adad4937 100644
--- a/llvm/test/CodeGen/PowerPC/shrink-wrap.mir
+++ b/llvm/test/CodeGen/PowerPC/shrink-wrap.mir
@@ -39,7 +39,7 @@
   declare i1 @llvm.loop.decrement.i64(i64) #0
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { noduplicate nounwind }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
index 9ea4b90086226..73bd475e9d498 100644
--- a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
+++ b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
@@ -9,7 +9,7 @@
 
   define i32 @tls_func() local_unnamed_addr {
   entry:
-    %0 = load i32, i32* @tls_var
+    %0 = load i32, ptr @tls_var
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
index 467f866923897..ffeb066b94785 100644
--- a/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
+++ b/llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
@@ -9,7 +9,7 @@
   
   define i32 @tls_func() local_unnamed_addr {
   entry:
-    %0 = load i32, i32* @tls_var
+    %0 = load i32, ptr @tls_var
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/PowerPC/two-address-crash.mir b/llvm/test/CodeGen/PowerPC/two-address-crash.mir
index a05ace4de2646..eda0a93e37f9d 100644
--- a/llvm/test/CodeGen/PowerPC/two-address-crash.mir
+++ b/llvm/test/CodeGen/PowerPC/two-address-crash.mir
@@ -4,14 +4,14 @@
 # RUN:   -verify-machineinstrs -o /dev/null 2>&1
 
 --- |
-  define void @VerifyTwoAddressCrash(i16 %div.0.i.i.i.i, i32 %L_num.0.i.i.i.i, i32 %tmp1.i.i206.i.i, i16* %P) {
+  define void @VerifyTwoAddressCrash(i16 %div.0.i.i.i.i, i32 %L_num.0.i.i.i.i, i32 %tmp1.i.i206.i.i, ptr %P) {
     %X = shl i16 %div.0.i.i.i.i, 1
     %tmp28.i.i.i.i = shl i32 %L_num.0.i.i.i.i, 1
     %tmp31.i.i.i.i = icmp slt i32 %tmp28.i.i.i.i, %tmp1.i.i206.i.i
     %tmp31.i.i.i.i.upgrd.1 = zext i1 %tmp31.i.i.i.i to i16
     %tmp371.i.i.i.i1 = or i16 %tmp31.i.i.i.i.upgrd.1, %X
     %div.0.be.i.i.i.i = xor i16 %tmp371.i.i.i.i1, 1
-    store i16 %div.0.be.i.i.i.i, i16* %P, align 2
+    store i16 %div.0.be.i.i.i.i, ptr %P, align 2
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir
index ac47bb0dc4740..ef037b8a5c46a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir
@@ -6,7 +6,7 @@
   @arm_cmplx_conj_f32_mve.cmplx_conj_sign = internal constant [4 x float] [float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00], align 4
 
   ; Function Attrs: nounwind
-  define hidden void @arm_cmplx_conj_f32_mve(float* %pSrc, float* %pDst, i32 %blockSize) local_unnamed_addr #0 {
+  define hidden void @arm_cmplx_conj_f32_mve(ptr %pSrc, ptr %pDst, i32 %blockSize) local_unnamed_addr #0 {
   entry:
     ret void
   }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir
index 4421f1444c386..d124063f6a846 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir
@@ -5,23 +5,23 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @do_copy(i32 %n, ptr nocapture %p, ptr nocapture readonly %q) {
   entry:
-    %scevgep = getelementptr i32, i32* %q, i32 -1
-    %scevgep3 = getelementptr i32, i32* %p, i32 -1
+    %scevgep = getelementptr i32, ptr %q, i32 -1
+    %scevgep3 = getelementptr i32, ptr %p, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %n)
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %entry
-    %lsr.iv4 = phi i32* [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
-    %lsr.iv = phi i32* [ %scevgep1, %while.body ], [ %scevgep, %entry ]
+    %lsr.iv4 = phi ptr [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
+    %lsr.iv = phi ptr [ %scevgep1, %while.body ], [ %scevgep, %entry ]
     %0 = phi i32 [ %start, %entry ], [ %2, %while.body ]
-    %scevgep6 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep2 = getelementptr i32, i32* %lsr.iv4, i32 1
-    %1 = load i32, i32* %scevgep6, align 4
-    store i32 %1, i32* %scevgep2, align 4
-    %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep2 = getelementptr i32, ptr %lsr.iv4, i32 1
+    %1 = load i32, ptr %scevgep6, align 4
+    store i32 %1, ptr %scevgep2, align 4
+    %scevgep1 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i32, ptr %lsr.iv4, i32 1
     %2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %3 = icmp ne i32 %2, 0
     br i1 %3, label %while.body, label %while.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir
index f3a731317d85a..588fe4cfcdb99 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/ctlz-non-zeros.mir
@@ -4,7 +4,7 @@
 # CHECK-NOT: LETP
 
 --- |
-  define arm_aapcs_vfpcc void @test_ctlz_i8(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c, i32 %elts, i32 %iters) #0 {
+  define arm_aapcs_vfpcc void @test_ctlz_i8(ptr %a, ptr %b, ptr %c, i32 %elts, i32 %iters) #0 {
   entry:
     %cmp = icmp slt i32 %elts, 1
     br i1 %cmp, label %exit, label %loop.ph
@@ -16,21 +16,21 @@
   loop.body:                                        ; preds = %loop.body, %loop.ph
     %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %start, %loop.ph ]
     %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ]
-    %addr.a = phi <8 x i16>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
-    %addr.b = phi <8 x i16>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
-    %addr.c = phi <8 x i16>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
+    %addr.a = phi ptr [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
+    %addr.b = phi ptr [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
+    %addr.c = phi ptr [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
     %pred = call <8 x i1> @llvm.arm.mve.vctp16(i32 %count)
     %elts.rem = sub i32 %count, 8
-    %masked.load.a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.a, i32 2, <8 x i1> %pred, <8 x i16> undef)
-    %masked.load.b = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.b, i32 2, <8 x i1> %pred, <8 x i16> undef)
+    %masked.load.a = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %addr.a, i32 2, <8 x i1> %pred, <8 x i16> undef)
+    %masked.load.b = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %addr.b, i32 2, <8 x i1> %pred, <8 x i16> undef)
     %bitcast.a = bitcast <8 x i16> %masked.load.a to <16 x i8>
     %ctlz = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %bitcast.a, i1 false)
     %shrn = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> %ctlz, <8 x i16> %masked.load.b, i32 1, i32 1, i32 0, i32 1, i32 0, i32 1)
     %bitcast = bitcast <16 x i8> %shrn to <8 x i16>
-    call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %bitcast, <8 x i16>* %addr.c, i32 2, <8 x i1> %pred)
-    %addr.a.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1
-    %addr.b.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1
-    %addr.c.next = getelementptr <8 x i16>, <8 x i16>* %addr.c, i32 1
+    call void @llvm.masked.store.v8i16.p0(<8 x i16> %bitcast, ptr %addr.c, i32 2, <8 x i1> %pred)
+    %addr.a.next = getelementptr <8 x i16>, ptr %addr.b, i32 1
+    %addr.b.next = getelementptr <8 x i16>, ptr %addr.b, i32 1
+    %addr.c.next = getelementptr <8 x i16>, ptr %addr.c, i32 1
     %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %end = icmp ne i32 %loop.dec, 0
     %lsr.iv.next = add i32 %lsr.iv, -1
@@ -40,7 +40,7 @@
     ret void
   }
 
-  define arm_aapcs_vfpcc void @test_ctlz_i16(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 {
+  define arm_aapcs_vfpcc void @test_ctlz_i16(ptr %a, ptr %b, ptr %c, i32 %elts, i32 %iters) #0 {
   entry:
     %cmp = icmp slt i32 %elts, 1
     br i1 %cmp, label %exit, label %loop.ph
@@ -52,21 +52,21 @@
   loop.body:                                        ; preds = %loop.body, %loop.ph
     %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %start, %loop.ph ]
     %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ]
-    %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
-    %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
-    %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
+    %addr.a = phi ptr [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
+    %addr.b = phi ptr [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
+    %addr.c = phi ptr [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
     %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count)
     %elts.rem = sub i32 %count, 4
-    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
-    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
     %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16>
     %ctlz = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %bitcast.a, i1 false)
     %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %ctlz, <4 x i32> %masked.load.b, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1)
     %bitcast = bitcast <8 x i16> %shrn to <4 x i32>
-    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred)
-    %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1
-    %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1
-    %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1
+    call void @llvm.masked.store.v4i32.p0(<4 x i32> %bitcast, ptr %addr.c, i32 4, <4 x i1> %pred)
+    %addr.a.next = getelementptr <4 x i32>, ptr %addr.a, i32 1
+    %addr.b.next = getelementptr <4 x i32>, ptr %addr.b, i32 1
+    %addr.c.next = getelementptr <4 x i32>, ptr %addr.c, i32 1
     %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %end = icmp ne i32 %loop.dec, 0
     %lsr.iv.next = add i32 %lsr.iv, -1
@@ -76,7 +76,7 @@
     ret void
   }
 
-  define arm_aapcs_vfpcc void @test_ctlz_i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 {
+  define arm_aapcs_vfpcc void @test_ctlz_i32(ptr %a, ptr %b, ptr %c, i32 %elts, i32 %iters) #0 {
   entry:
     %cmp = icmp slt i32 %elts, 1
     br i1 %cmp, label %exit, label %loop.ph
@@ -88,21 +88,21 @@
   loop.body:                                        ; preds = %loop.body, %loop.ph
     %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %start, %loop.ph ]
     %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ]
-    %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
-    %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
-    %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
+    %addr.a = phi ptr [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
+    %addr.b = phi ptr [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
+    %addr.c = phi ptr [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
     %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count)
     %elts.rem = sub i32 %count, 4
-    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
-    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
     %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %masked.load.b, i1 false)
     %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16>
     %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %ctlz, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1)
     %bitcast = bitcast <8 x i16> %shrn to <4 x i32>
-    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred)
-    %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1
-    %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1
-    %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1
+    call void @llvm.masked.store.v4i32.p0(<4 x i32> %bitcast, ptr %addr.c, i32 4, <4 x i1> %pred)
+    %addr.a.next = getelementptr <4 x i32>, ptr %addr.a, i32 1
+    %addr.b.next = getelementptr <4 x i32>, ptr %addr.b, i32 1
+    %addr.c.next = getelementptr <4 x i32>, ptr %addr.c, i32 1
     %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %end = icmp ne i32 %loop.dec, 0
     %lsr.iv.next = add i32 %lsr.iv, -1
@@ -118,12 +118,12 @@
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
   declare <4 x i1> @llvm.arm.mve.vctp32(i32)
-  declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-  declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+  declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+  declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
   declare <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32, i32, i32, i32)
   declare <8 x i1> @llvm.arm.mve.vctp16(i32)
-  declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
-  declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
+  declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
+  declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
   declare <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8>, <8 x i16>, i32, i32, i32, i32, i32, i32)
 
 ...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir
index 9ef52a544f553..f322424170399 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/dont-ignore-vctp.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
 
 --- |
-  define hidden arm_aapcs_vfpcc void @dont_ignore_vctp(float* %pSrc, float* %pDst, i32 %blockSize) local_unnamed_addr #0 {
+  define hidden arm_aapcs_vfpcc void @dont_ignore_vctp(ptr %pSrc, ptr %pDst, i32 %blockSize) local_unnamed_addr #0 {
   entry:
     %mul = shl i32 %blockSize, 1
     %0 = add i32 %mul, 3
@@ -16,17 +16,15 @@
 
   do.body:                                          ; preds = %do.body, %entry
     %blkCnt.0 = phi i32 [ %mul, %entry ], [ %sub, %do.body ]
-    %pDst.addr.0 = phi float* [ %pDst, %entry ], [ %add.ptr4, %do.body ]
-    %pSrc.addr.0 = phi float* [ %pSrc, %entry ], [ %add.ptr, %do.body ]
+    %pDst.addr.0 = phi ptr [ %pDst, %entry ], [ %add.ptr4, %do.body ]
+    %pSrc.addr.0 = phi ptr [ %pSrc, %entry ], [ %add.ptr, %do.body ]
     %5 = phi i32 [ %start, %entry ], [ %9, %do.body ]
     %6 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0)
-    %input_cast = bitcast float* %pSrc.addr.0 to <4 x float>*
-    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %input_cast, i32 4, <4 x i1> %6, <4 x float> undef)
+    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %pSrc.addr.0, i32 4, <4 x i1> %6, <4 x float> undef)
     %8 = fmul <4 x float> %7, <float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00>
-    %output_cast = bitcast float* %pDst.addr.0 to <4 x float>*
-    tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %8, <4 x float>* %output_cast, i32 4, <4 x i1> %6)
-    %add.ptr = getelementptr inbounds float, float* %pSrc.addr.0, i32 4
-    %add.ptr4 = getelementptr inbounds float, float* %pDst.addr.0, i32 4
+    tail call void @llvm.masked.store.v4f32.p0(<4 x float> %8, ptr %pDst.addr.0, i32 4, <4 x i1> %6)
+    %add.ptr = getelementptr inbounds float, ptr %pSrc.addr.0, i32 4
+    %add.ptr4 = getelementptr inbounds float, ptr %pDst.addr.0, i32 4
     %sub = add nsw i32 %blkCnt.0, -4
     %9 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %5, i32 1)
     %10 = icmp ne i32 %9, 0
@@ -36,8 +34,8 @@
     ret void
   }
   declare <4 x i1> @llvm.arm.mve.vctp32(i32) #1
-  declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
-  declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+  declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
+  declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir
index d4781d27306a5..4f667a549f3f5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/end-positive-offset.mir
@@ -5,25 +5,25 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define void @size_limit(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+  define void @size_limit(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
   entry:
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
-    %scevgep = getelementptr i32, i32* %a, i32 -1
-    %scevgep4 = getelementptr i32, i32* %c, i32 -1
-    %scevgep8 = getelementptr i32, i32* %b, i32 -1
+    %scevgep = getelementptr i32, ptr %a, i32 -1
+    %scevgep4 = getelementptr i32, ptr %c, i32 -1
+    %scevgep8 = getelementptr i32, ptr %b, i32 -1
     br label %for.header
 
   for.body:                                         ; preds = %for.header
-    %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-    %ld1 = load i32, i32* %scevgep11, align 4
-    %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %ld2 = load i32, i32* %scevgep7, align 4
+    %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+    %ld1 = load i32, ptr %scevgep11, align 4
+    %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %ld2 = load i32, ptr %scevgep7, align 4
     %mul = mul nsw i32 %ld2, %ld1
-    %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-    store i32 %mul, i32* %scevgep3, align 4
-    %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+    %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+    store i32 %mul, ptr %scevgep3, align 4
+    %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
     %count.next = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %count, i32 1)
     %cmp = icmp ne i32 %count.next, 0
     br i1 %cmp, label %for.header, label %for.cond.cleanup
@@ -32,9 +32,9 @@
     ret void
 
   for.header:                                       ; preds = %for.body, %entry
-    %lsr.iv9 = phi i32* [ %scevgep8, %entry ], [ %scevgep10, %for.body ]
-    %lsr.iv5 = phi i32* [ %scevgep4, %entry ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi i32* [ %scevgep, %entry ], [ %scevgep2, %for.body ]
+    %lsr.iv9 = phi ptr [ %scevgep8, %entry ], [ %scevgep10, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %entry ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %entry ], [ %scevgep2, %for.body ]
     %count = phi i32 [ %start, %entry ], [ %count.next, %for.body ]
     br label %for.body
   }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir
index 419a098090706..f27777bc34031 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-chain.mir
@@ -4,7 +4,7 @@
 # IT-block with 3 statements, all chained together.
 
 --- |
-  define hidden arm_aapcs_vfpcc void @it_block_2_stmts(float* %pSrc, float* %pDst, i32 %blockSize) local_unnamed_addr #0 {
+  define hidden arm_aapcs_vfpcc void @it_block_2_stmts(ptr %pSrc, ptr %pDst, i32 %blockSize) local_unnamed_addr #0 {
   entry:
     %mul = shl i32 %blockSize, 1
     %0 = add i32 %mul, 3
@@ -18,17 +18,15 @@
 
   do.body:                                          ; preds = %do.body, %entry
     %blkCnt.0 = phi i32 [ %mul, %entry ], [ %sub, %do.body ]
-    %pDst.addr.0 = phi float* [ %pDst, %entry ], [ %add.ptr4, %do.body ]
-    %pSrc.addr.0 = phi float* [ %pSrc, %entry ], [ %add.ptr, %do.body ]
+    %pDst.addr.0 = phi ptr [ %pDst, %entry ], [ %add.ptr4, %do.body ]
+    %pSrc.addr.0 = phi ptr [ %pSrc, %entry ], [ %add.ptr, %do.body ]
     %5 = phi i32 [ %start, %entry ], [ %9, %do.body ]
     %6 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0)
-    %input_cast = bitcast float* %pSrc.addr.0 to <4 x float>*
-    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %input_cast, i32 4, <4 x i1> %6, <4 x float> undef)
+    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %pSrc.addr.0, i32 4, <4 x i1> %6, <4 x float> undef)
     %8 = fmul <4 x float> %7, <float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00>
-    %output_cast = bitcast float* %pDst.addr.0 to <4 x float>*
-    tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %8, <4 x float>* %output_cast, i32 4, <4 x i1> %6)
-    %add.ptr = getelementptr inbounds float, float* %pSrc.addr.0, i32 4
-    %add.ptr4 = getelementptr inbounds float, float* %pDst.addr.0, i32 4
+    tail call void @llvm.masked.store.v4f32.p0(<4 x float> %8, ptr %pDst.addr.0, i32 4, <4 x i1> %6)
+    %add.ptr = getelementptr inbounds float, ptr %pSrc.addr.0, i32 4
+    %add.ptr4 = getelementptr inbounds float, ptr %pDst.addr.0, i32 4
     %sub = add nsw i32 %blkCnt.0, -4
     %9 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %5, i32 1)
     %10 = icmp ne i32 %9, 0
@@ -38,8 +36,8 @@
     ret void
   }
   declare <4 x i1> @llvm.arm.mve.vctp32(i32) #1
-  declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
-  declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+  declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
+  declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir
index e693d3fc6b7eb..fc216474db459 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-itercount.mir
@@ -5,7 +5,7 @@
 # not remove any of the iteration count statements.
 
 --- |
-  define hidden arm_aapcs_vfpcc void @it_block_2_stmts(float* %pSrc, float* %pDst, i32 %blockSize) local_unnamed_addr #0 {
+  define hidden arm_aapcs_vfpcc void @it_block_2_stmts(ptr %pSrc, ptr %pDst, i32 %blockSize) local_unnamed_addr #0 {
   entry:
     %mul = shl i32 %blockSize, 1
     %0 = add i32 %mul, 3
@@ -19,17 +19,15 @@
 
   do.body:                                          ; preds = %do.body, %entry
     %blkCnt.0 = phi i32 [ %mul, %entry ], [ %sub, %do.body ]
-    %pDst.addr.0 = phi float* [ %pDst, %entry ], [ %add.ptr4, %do.body ]
-    %pSrc.addr.0 = phi float* [ %pSrc, %entry ], [ %add.ptr, %do.body ]
+    %pDst.addr.0 = phi ptr [ %pDst, %entry ], [ %add.ptr4, %do.body ]
+    %pSrc.addr.0 = phi ptr [ %pSrc, %entry ], [ %add.ptr, %do.body ]
     %5 = phi i32 [ %start, %entry ], [ %9, %do.body ]
     %6 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0)
-    %input_cast = bitcast float* %pSrc.addr.0 to <4 x float>*
-    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %input_cast, i32 4, <4 x i1> %6, <4 x float> undef)
+    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %pSrc.addr.0, i32 4, <4 x i1> %6, <4 x float> undef)
     %8 = fmul <4 x float> %7, <float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00>
-    %output_cast = bitcast float* %pDst.addr.0 to <4 x float>*
-    tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %8, <4 x float>* %output_cast, i32 4, <4 x i1> %6)
-    %add.ptr = getelementptr inbounds float, float* %pSrc.addr.0, i32 4
-    %add.ptr4 = getelementptr inbounds float, float* %pDst.addr.0, i32 4
+    tail call void @llvm.masked.store.v4f32.p0(<4 x float> %8, ptr %pDst.addr.0, i32 4, <4 x i1> %6)
+    %add.ptr = getelementptr inbounds float, ptr %pSrc.addr.0, i32 4
+    %add.ptr4 = getelementptr inbounds float, ptr %pDst.addr.0, i32 4
     %sub = add nsw i32 %blkCnt.0, -4
     %9 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %5, i32 1)
     %10 = icmp ne i32 %9, 0
@@ -39,8 +37,8 @@
     ret void
   }
   declare <4 x i1> @llvm.arm.mve.vctp32(i32) #1
-  declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
-  declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+  declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
+  declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
index 5a54f3e2c3eaa..938ae829db4eb 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-mov.mir
@@ -3,7 +3,7 @@
 
 --- |
   ; Function Attrs: nounwind
-  define hidden arm_aapcs_vfpcc void @cond_trip_count(float* %0, i32 %1, float* nocapture %2) local_unnamed_addr #1 {
+  define hidden arm_aapcs_vfpcc void @cond_trip_count(ptr %0, i32 %1, ptr nocapture %2) local_unnamed_addr #1 {
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir
index d9cc6d7e40c61..1fac3e7e90ae8 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/it-block-random.mir
@@ -6,7 +6,7 @@
 # IT block and any of its instructions.
 
 --- |
-  define hidden arm_aapcs_vfpcc void @it_block_2_stmts(float* %pSrc, float* %pDst, i32 %blockSize) local_unnamed_addr #0 {
+  define hidden arm_aapcs_vfpcc void @it_block_2_stmts(ptr %pSrc, ptr %pDst, i32 %blockSize) local_unnamed_addr #0 {
   entry:
     %mul = shl i32 %blockSize, 1
     %0 = add i32 %mul, 3
@@ -20,17 +20,15 @@
 
   do.body:                                          ; preds = %do.body, %entry
     %blkCnt.0 = phi i32 [ %mul, %entry ], [ %sub, %do.body ]
-    %pDst.addr.0 = phi float* [ %pDst, %entry ], [ %add.ptr4, %do.body ]
-    %pSrc.addr.0 = phi float* [ %pSrc, %entry ], [ %add.ptr, %do.body ]
+    %pDst.addr.0 = phi ptr [ %pDst, %entry ], [ %add.ptr4, %do.body ]
+    %pSrc.addr.0 = phi ptr [ %pSrc, %entry ], [ %add.ptr, %do.body ]
     %5 = phi i32 [ %start, %entry ], [ %9, %do.body ]
     %6 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %blkCnt.0)
-    %input_cast = bitcast float* %pSrc.addr.0 to <4 x float>*
-    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %input_cast, i32 4, <4 x i1> %6, <4 x float> undef)
+    %7 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %pSrc.addr.0, i32 4, <4 x i1> %6, <4 x float> undef)
     %8 = fmul <4 x float> %7, <float 1.000000e+00, float -1.000000e+00, float 1.000000e+00, float -1.000000e+00>
-    %output_cast = bitcast float* %pDst.addr.0 to <4 x float>*
-    tail call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %8, <4 x float>* %output_cast, i32 4, <4 x i1> %6)
-    %add.ptr = getelementptr inbounds float, float* %pSrc.addr.0, i32 4
-    %add.ptr4 = getelementptr inbounds float, float* %pDst.addr.0, i32 4
+    tail call void @llvm.masked.store.v4f32.p0(<4 x float> %8, ptr %pDst.addr.0, i32 4, <4 x i1> %6)
+    %add.ptr = getelementptr inbounds float, ptr %pSrc.addr.0, i32 4
+    %add.ptr4 = getelementptr inbounds float, ptr %pDst.addr.0, i32 4
     %sub = add nsw i32 %blkCnt.0, -4
     %9 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %5, i32 1)
     %10 = icmp ne i32 %9, 0
@@ -40,8 +38,8 @@
     ret void
   }
   declare <4 x i1> @llvm.arm.mve.vctp32(i32) #1
-  declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
-  declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
+  declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
+  declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir
index 3a926e7d556c6..2fb744e8e7621 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/massive.mir
@@ -7,15 +7,15 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define dso_local arm_aapcscc void @massive(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+  define dso_local arm_aapcscc void @massive(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
   entry:
     %cmp8 = icmp eq i32 %N, 0
     br i1 %cmp8, label %for.cond.cleanup, label %for.body.preheader
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr i32, i32* %a, i32 -1
-    %scevgep4 = getelementptr i32, i32* %c, i32 -1
-    %scevgep8 = getelementptr i32, i32* %b, i32 -1
+    %scevgep = getelementptr i32, ptr %a, i32 -1
+    %scevgep4 = getelementptr i32, ptr %c, i32 -1
+    %scevgep8 = getelementptr i32, ptr %b, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
     br label %for.body
 
@@ -23,21 +23,21 @@
     ret void
 
   for.body:                                         ; preds = %for.body, %for.body.preheader
-    %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-    %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %0 = phi i32 [ %start, %for.body.preheader ], [ %3, %for.body ]
     %size = call i32 @llvm.arm.space(i32 4096, i32 undef)
-    %scevgep3 = getelementptr i32, i32* %lsr.iv9, i32 1
-    %1 = load i32, i32* %scevgep3, align 4
-    %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %2 = load i32, i32* %scevgep7, align 4
+    %scevgep3 = getelementptr i32, ptr %lsr.iv9, i32 1
+    %1 = load i32, ptr %scevgep3, align 4
+    %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %2 = load i32, ptr %scevgep7, align 4
     %mul = mul nsw i32 %2, %1
-    %scevgep11 = getelementptr i32, i32* %lsr.iv1, i32 1
-    store i32 %mul, i32* %scevgep11, align 4
-    %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+    %scevgep11 = getelementptr i32, ptr %lsr.iv1, i32 1
+    store i32 %mul, ptr %scevgep11, align 4
+    %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
     %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %4 = icmp ne i32 %3, 0
     br i1 %4, label %for.body, label %for.cond.cleanup
@@ -53,7 +53,7 @@
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
   attributes #1 = { noduplicate nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir
index d3d333de00ca2..1b6737c9073e5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-after-dls.mir
@@ -5,23 +5,23 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @do_copy(i32 %n, ptr nocapture %p, ptr nocapture readonly %q) {
   entry:
-    %scevgep = getelementptr i32, i32* %q, i32 -1
-    %scevgep3 = getelementptr i32, i32* %p, i32 -1
+    %scevgep = getelementptr i32, ptr %q, i32 -1
+    %scevgep3 = getelementptr i32, ptr %p, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %n)
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %entry
-    %lsr.iv4 = phi i32* [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
-    %lsr.iv = phi i32* [ %scevgep1, %while.body ], [ %scevgep, %entry ]
+    %lsr.iv4 = phi ptr [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
+    %lsr.iv = phi ptr [ %scevgep1, %while.body ], [ %scevgep, %entry ]
     %0 = phi i32 [ %start, %entry ], [ %2, %while.body ]
-    %scevgep6 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep2 = getelementptr i32, i32* %lsr.iv4, i32 1
-    %1 = load i32, i32* %scevgep6, align 4
-    store i32 %1, i32* %scevgep2, align 4
-    %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep2 = getelementptr i32, ptr %lsr.iv4, i32 1
+    %1 = load i32, ptr %scevgep6, align 4
+    store i32 %1, ptr %scevgep2, align 4
+    %scevgep1 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i32, ptr %lsr.iv4, i32 1
     %2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %3 = icmp ne i32 %2, 0
     br i1 %3, label %while.body, label %while.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir
index 7af07abb1c6f0..f4377a3996125 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/multiblock-massive.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mtriple=armv8.1m.main -mattr=+lob -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define void @size_limit(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+  define void @size_limit(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
   entry:
     %cmp8 = icmp eq i32 %N, 0
     br i1 %cmp8, label %for.cond.cleanup, label %for.body.preheader
@@ -15,28 +15,28 @@
     ret void
 
   for.body:                                         ; preds = %for.end, %for.body.preheader
-    %lsr.iv4 = phi i32* [ %b, %for.body.preheader ], [ %scevgep5, %for.end ]
-    %lsr.iv2 = phi i32* [ %c, %for.body.preheader ], [ %scevgep3, %for.end ]
-    %lsr.iv1 = phi i32* [ %a, %for.body.preheader ], [ %scevgep, %for.end ]
+    %lsr.iv4 = phi ptr [ %b, %for.body.preheader ], [ %scevgep5, %for.end ]
+    %lsr.iv2 = phi ptr [ %c, %for.body.preheader ], [ %scevgep3, %for.end ]
+    %lsr.iv1 = phi ptr [ %a, %for.body.preheader ], [ %scevgep, %for.end ]
     %lsr.iv = phi i32 [ %start, %for.body.preheader ], [ %lsr.iv.next, %for.end ]
     %size = call i32 @llvm.arm.space(i32 3072, i32 undef)
-    %0 = load i32, i32* %lsr.iv4, align 4
-    %1 = load i32, i32* %lsr.iv2, align 4
+    %0 = load i32, ptr %lsr.iv4, align 4
+    %1 = load i32, ptr %lsr.iv2, align 4
     %mul = mul nsw i32 %1, %0
-    store i32 %mul, i32* %lsr.iv1, align 4
+    store i32 %mul, ptr %lsr.iv1, align 4
     %cmp = icmp ne i32 %0, 0
     br i1 %cmp, label %middle.block, label %for.end
 
   middle.block:                                     ; preds = %for.body
     %div = udiv i32 %1, %0
-    store i32 %div, i32* %lsr.iv1, align 4
+    store i32 %div, ptr %lsr.iv1, align 4
     %size.1 = call i32 @llvm.arm.space(i32 1024, i32 undef)
     br label %for.end
 
   for.end:                                          ; preds = %middle.block, %for.body
-    %scevgep = getelementptr i32, i32* %lsr.iv1, i32 1
-    %scevgep3 = getelementptr i32, i32* %lsr.iv2, i32 1
-    %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1
+    %scevgep = getelementptr i32, ptr %lsr.iv1, i32 1
+    %scevgep3 = getelementptr i32, ptr %lsr.iv2, i32 1
+    %scevgep5 = getelementptr i32, ptr %lsr.iv4, i32 1
     %lsr.iv.next = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %exitcond = icmp eq i32 %lsr.iv.next, 0
     br i1 %exitcond, label %for.cond.cleanup, label %for.body
@@ -52,7 +52,7 @@
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
   attributes #1 = { noduplicate nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir
index c136cfa74bb44..5815b149859b5 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/out-of-range-cbz.mir
@@ -12,22 +12,22 @@
     br i1 %tobool5, label %j.us.us.preheader, label %entry.split
 
   j.us.us.preheader:                                ; preds = %entry
-    %.pre59 = load i32, i32* @d, align 4
+    %.pre59 = load i32, ptr @d, align 4
     br label %j.us.us
 
   j.us.us:                                          ; preds = %j.us.us, %if.end.us.us.us, %if.end.us.us.us.1, %if.end.us.us.us.2, %if.end.us.us.us.3, %if.end.us.us.us.4, %if.end.us.us.us.5, %if.end.us.us.us.6, %j.us.us.preheader
     %0 = phi i32 [ %.pre59, %j.us.us.preheader ], [ %12, %if.end.us.us.us.6 ], [ %11, %if.end.us.us.us.5 ], [ %10, %if.end.us.us.us.4 ], [ %9, %if.end.us.us.us.3 ], [ %8, %if.end.us.us.us.2 ], [ %7, %if.end.us.us.us.1 ], [ %2, %if.end.us.us.us ], [ %0, %j.us.us ]
-    %cmp.us.us = icmp slt i32 %0, ptrtoint (i32* @a to i32)
+    %cmp.us.us = icmp slt i32 %0, ptrtoint (ptr @a to i32)
     %conv1.us.us = zext i1 %cmp.us.us to i32
-    %1 = load i32, i32* @e, align 4
+    %1 = load i32, ptr @e, align 4
     %and.us.us = and i32 %1, %conv1.us.us
-    store i32 %and.us.us, i32* @e, align 4
+    store i32 %and.us.us, ptr @e, align 4
     %tobool4.us.us.us = icmp eq i32 %0, 0
     br i1 %tobool4.us.us.us, label %if.end.us.us.us, label %j.us.us
 
   if.end.us.us.us:                                  ; preds = %j.us.us
     tail call void asm sideeffect "", ""()
-    %2 = load i32, i32* @d, align 4
+    %2 = load i32, ptr @d, align 4
     %tobool4.us.us.us.1 = icmp eq i32 %2, 0
     br i1 %tobool4.us.us.us.1, label %if.end.us.us.us.1, label %j.us.us
 
@@ -36,16 +36,16 @@
     br i1 %tobool, label %j.us27.preheader, label %j.preheader
 
   j.preheader:                                      ; preds = %entry.split
-    %.pre = load i32, i32* @e, align 4
-    %.pre55 = load i32, i32* @d, align 4
-    %cmp = icmp slt i32 %conv, ptrtoint (i32* @a to i32)
+    %.pre = load i32, ptr @e, align 4
+    %.pre55 = load i32, ptr @d, align 4
+    %cmp = icmp slt i32 %conv, ptrtoint (ptr @a to i32)
     %conv1 = zext i1 %cmp to i32
     br label %j
 
   j.us27.preheader:                                 ; preds = %entry.split
-    %.pre56 = load i32, i32* @d, align 4
-    %.pre57 = load i32, i32* @e, align 4
-    %cmp.us29 = icmp slt i32 %.pre56, ptrtoint (i32* @a to i32)
+    %.pre56 = load i32, ptr @d, align 4
+    %.pre57 = load i32, ptr @e, align 4
+    %cmp.us29 = icmp slt i32 %.pre56, ptrtoint (ptr @a to i32)
     %conv1.us30 = zext i1 %cmp.us29 to i32
     br label %j.us27
 
@@ -56,7 +56,7 @@
     br i1 %4, label %if.end.us38, label %j.us27
 
   if.end.us38:                                      ; preds = %j.us27
-    store i32 %and.us31, i32* @e, align 4
+    store i32 %and.us31, ptr @e, align 4
     tail call void asm sideeffect "", ""()
     ret void
 
@@ -67,43 +67,43 @@
     br i1 %6, label %if.end, label %j
 
   if.end:                                           ; preds = %j
-    store i32 %and, i32* @e, align 4
+    store i32 %and, ptr @e, align 4
     tail call void asm sideeffect "", ""()
     ret void
 
   if.end.us.us.us.1:                                ; preds = %if.end.us.us.us
     tail call void asm sideeffect "", ""()
-    %7 = load i32, i32* @d, align 4
+    %7 = load i32, ptr @d, align 4
     %tobool4.us.us.us.2 = icmp eq i32 %7, 0
     br i1 %tobool4.us.us.us.2, label %if.end.us.us.us.2, label %j.us.us
 
   if.end.us.us.us.2:                                ; preds = %if.end.us.us.us.1
     tail call void asm sideeffect "", ""()
-    %8 = load i32, i32* @d, align 4
+    %8 = load i32, ptr @d, align 4
     %tobool4.us.us.us.3 = icmp eq i32 %8, 0
     br i1 %tobool4.us.us.us.3, label %if.end.us.us.us.3, label %j.us.us
 
   if.end.us.us.us.3:                                ; preds = %if.end.us.us.us.2
     tail call void asm sideeffect "", ""()
-    %9 = load i32, i32* @d, align 4
+    %9 = load i32, ptr @d, align 4
     %tobool4.us.us.us.4 = icmp eq i32 %9, 0
     br i1 %tobool4.us.us.us.4, label %if.end.us.us.us.4, label %j.us.us
 
   if.end.us.us.us.4:                                ; preds = %if.end.us.us.us.3
     tail call void asm sideeffect "", ""()
-    %10 = load i32, i32* @d, align 4
+    %10 = load i32, ptr @d, align 4
     %tobool4.us.us.us.5 = icmp eq i32 %10, 0
     br i1 %tobool4.us.us.us.5, label %if.end.us.us.us.5, label %j.us.us
 
   if.end.us.us.us.5:                                ; preds = %if.end.us.us.us.4
     tail call void asm sideeffect "", ""()
-    %11 = load i32, i32* @d, align 4
+    %11 = load i32, ptr @d, align 4
     %tobool4.us.us.us.6 = icmp eq i32 %11, 0
     br i1 %tobool4.us.us.us.6, label %if.end.us.us.us.6, label %j.us.us
 
   if.end.us.us.us.6:                                ; preds = %if.end.us.us.us.5
     tail call void asm sideeffect "", ""()
-    %12 = load i32, i32* @d, align 4
+    %12 = load i32, ptr @d, align 4
     %tobool4.us.us.us.7 = icmp eq i32 %12, 0
     br i1 %tobool4.us.us.us.7, label %if.end.us.us.us.7, label %j.us.us
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir
index ebf57e704fa27..26336836c370e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/remove-elem-moves.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
 
 --- |
-  define dso_local arm_aapcs_vfpcc void @remove_mov_lr_chain(float* nocapture readonly %pSrc, float* nocapture %pDst, i32 %blockSize) #0 {
+  define dso_local arm_aapcs_vfpcc void @remove_mov_lr_chain(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 %blockSize) #0 {
   entry:
     %cmp5 = icmp eq i32 %blockSize, 0
     br i1 %cmp5, label %while.end, label %while.body.preheader
@@ -12,10 +12,10 @@
     br i1 %min.iters.check, label %while.body.preheader19, label %vector.memcheck
 
   vector.memcheck:                                  ; preds = %while.body.preheader
-    %scevgep = getelementptr float, float* %pDst, i32 %blockSize
-    %scevgep12 = getelementptr float, float* %pSrc, i32 %blockSize
-    %bound0 = icmp ugt float* %scevgep12, %pDst
-    %bound1 = icmp ugt float* %scevgep, %pSrc
+    %scevgep = getelementptr float, ptr %pDst, i32 %blockSize
+    %scevgep12 = getelementptr float, ptr %pSrc, i32 %blockSize
+    %bound0 = icmp ugt ptr %scevgep12, %pDst
+    %bound1 = icmp ugt ptr %scevgep, %pSrc
     %found.conflict = and i1 %bound0, %bound1
     %0 = lshr i32 %blockSize, 2
     %1 = shl nuw i32 %0, 2
@@ -27,26 +27,24 @@
   vector.ph:                                        ; preds = %vector.memcheck
     %n.vec = and i32 %blockSize, -4
     %ind.end = sub i32 %blockSize, %n.vec
-    %ind.end15 = getelementptr float, float* %pSrc, i32 %n.vec
-    %ind.end17 = getelementptr float, float* %pDst, i32 %n.vec
-    %scevgep9 = getelementptr float, float* %pDst, i32 -4
-    %scevgep14 = getelementptr float, float* %pSrc, i32 -4
+    %ind.end15 = getelementptr float, ptr %pSrc, i32 %n.vec
+    %ind.end17 = getelementptr float, ptr %pDst, i32 %n.vec
+    %scevgep9 = getelementptr float, ptr %pDst, i32 -4
+    %scevgep14 = getelementptr float, ptr %pSrc, i32 -4
     %start1 = call i32 @llvm.start.loop.iterations.i32(i32 %4)
     br label %vector.body
 
   vector.body:                                      ; preds = %vector.body, %vector.ph
-    %lsr.iv15 = phi float* [ %scevgep16, %vector.body ], [ %scevgep14, %vector.ph ]
-    %lsr.iv10 = phi float* [ %scevgep11, %vector.body ], [ %scevgep9, %vector.ph ]
+    %lsr.iv15 = phi ptr [ %scevgep16, %vector.body ], [ %scevgep14, %vector.ph ]
+    %lsr.iv10 = phi ptr [ %scevgep11, %vector.body ], [ %scevgep9, %vector.ph ]
     %5 = phi i32 [ %start1, %vector.ph ], [ %7, %vector.body ]
-    %lsr.iv1517 = bitcast float* %lsr.iv15 to <4 x float>*
-    %lsr.iv1012 = bitcast float* %lsr.iv10 to <4 x float>*
-    %scevgep18 = getelementptr <4 x float>, <4 x float>* %lsr.iv1517, i32 1
-    %wide.load = load <4 x float>, <4 x float>* %scevgep18, align 4
+    %scevgep18 = getelementptr <4 x float>, ptr %lsr.iv15, i32 1
+    %wide.load = load <4 x float>, ptr %scevgep18, align 4
     %6 = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> %wide.load)
-    %scevgep13 = getelementptr <4 x float>, <4 x float>* %lsr.iv1012, i32 1
-    store <4 x float> %6, <4 x float>* %scevgep13, align 4
-    %scevgep11 = getelementptr float, float* %lsr.iv10, i32 4
-    %scevgep16 = getelementptr float, float* %lsr.iv15, i32 4
+    %scevgep13 = getelementptr <4 x float>, ptr %lsr.iv10, i32 1
+    store <4 x float> %6, ptr %scevgep13, align 4
+    %scevgep11 = getelementptr float, ptr %lsr.iv10, i32 4
+    %scevgep16 = getelementptr float, ptr %lsr.iv15, i32 4
     %7 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %5, i32 1)
     %8 = icmp ne i32 %7, 0
     br i1 %8, label %vector.body, label %middle.block
@@ -57,24 +55,24 @@
 
   while.body.preheader19:                           ; preds = %middle.block, %vector.memcheck, %while.body.preheader
     %blkCnt.08.ph = phi i32 [ %blockSize, %vector.memcheck ], [ %blockSize, %while.body.preheader ], [ %ind.end, %middle.block ]
-    %pSrc.addr.07.ph = phi float* [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end15, %middle.block ]
-    %pDst.addr.06.ph = phi float* [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end17, %middle.block ]
-    %scevgep1 = getelementptr float, float* %pSrc.addr.07.ph, i32 -1
-    %scevgep4 = getelementptr float, float* %pDst.addr.06.ph, i32 -1
+    %pSrc.addr.07.ph = phi ptr [ %pSrc, %vector.memcheck ], [ %pSrc, %while.body.preheader ], [ %ind.end15, %middle.block ]
+    %pDst.addr.06.ph = phi ptr [ %pDst, %vector.memcheck ], [ %pDst, %while.body.preheader ], [ %ind.end17, %middle.block ]
+    %scevgep1 = getelementptr float, ptr %pSrc.addr.07.ph, i32 -1
+    %scevgep4 = getelementptr float, ptr %pDst.addr.06.ph, i32 -1
     %start2 = call i32 @llvm.start.loop.iterations.i32(i32 %blkCnt.08.ph)
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %while.body.preheader19
-    %lsr.iv5 = phi float* [ %scevgep6, %while.body ], [ %scevgep4, %while.body.preheader19 ]
-    %lsr.iv = phi float* [ %scevgep2, %while.body ], [ %scevgep1, %while.body.preheader19 ]
+    %lsr.iv5 = phi ptr [ %scevgep6, %while.body ], [ %scevgep4, %while.body.preheader19 ]
+    %lsr.iv = phi ptr [ %scevgep2, %while.body ], [ %scevgep1, %while.body.preheader19 ]
     %9 = phi i32 [ %start2, %while.body.preheader19 ], [ %12, %while.body ]
-    %scevgep3 = getelementptr float, float* %lsr.iv, i32 1
-    %scevgep7 = getelementptr float, float* %lsr.iv5, i32 1
-    %10 = load float, float* %scevgep3, align 4
+    %scevgep3 = getelementptr float, ptr %lsr.iv, i32 1
+    %scevgep7 = getelementptr float, ptr %lsr.iv5, i32 1
+    %10 = load float, ptr %scevgep3, align 4
     %11 = tail call fast float @llvm.fabs.f32(float %10)
-    store float %11, float* %scevgep7, align 4
-    %scevgep2 = getelementptr float, float* %lsr.iv, i32 1
-    %scevgep6 = getelementptr float, float* %lsr.iv5, i32 1
+    store float %11, ptr %scevgep7, align 4
+    %scevgep2 = getelementptr float, ptr %lsr.iv, i32 1
+    %scevgep6 = getelementptr float, ptr %lsr.iv5, i32 1
     %12 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %9, i32 1)
     %13 = icmp ne i32 %12, 0
     br i1 %13, label %while.body, label %while.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-call.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-call.mir
index 81514a02577e8..1fe361d478a3e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-call.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-call.mir
@@ -20,7 +20,7 @@
   while.body:                                       ; preds = %while.body, %while.body.preheader
     %res.07 = phi i32 [ %add, %while.body ], [ 0, %while.body.preheader ]
     %0 = phi i32 [ %start, %while.body.preheader ], [ %1, %while.body ]
-    %call = tail call i32 bitcast (i32 (...)* @bar to i32 ()*)()
+    %call = tail call i32 @bar()
     %add = add nsw i32 %call, %res.07
     %1 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %2 = icmp ne i32 %1, 0

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-write.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-write.mir
index d74d77f3158d2..78d52eb4106d9 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-write.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-after-write.mir
@@ -36,7 +36,7 @@
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
   
   attributes #0 = { "target-features"="+mve.fp" }
   attributes #1 = { noduplicate nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-non-loop.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-non-loop.mir
index d8a1dcc3bba49..1c1d21daf0375 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-non-loop.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-non-loop.mir
@@ -17,7 +17,7 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
   
-  define void @non_loop(i16* nocapture %a, i16* nocapture readonly %b, i32 %N) {
+  define void @non_loop(ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
   entry:
     %cmp = icmp ugt i32 %N, 2
     br i1 %cmp, label %not.preheader, label %while.body.preheader
@@ -27,22 +27,22 @@
     br i1 %test, label %while.body.preheader, label %while.end
   
   while.body.preheader:                             ; preds = %not.preheader, %entry
-    %scevgep = getelementptr i16, i16* %a, i32 -1
-    %scevgep3 = getelementptr i16, i16* %b, i32 -1
+    %scevgep = getelementptr i16, ptr %a, i32 -1
+    %scevgep3 = getelementptr i16, ptr %b, i32 -1
     br label %while.body
   
   while.body:                                       ; preds = %while.body, %while.body.preheader
-    %lsr.iv4 = phi i16* [ %scevgep3, %while.body.preheader ], [ %scevgep5, %while.body ]
-    %lsr.iv = phi i16* [ %scevgep, %while.body.preheader ], [ %scevgep1, %while.body ]
+    %lsr.iv4 = phi ptr [ %scevgep3, %while.body.preheader ], [ %scevgep5, %while.body ]
+    %lsr.iv = phi ptr [ %scevgep, %while.body.preheader ], [ %scevgep1, %while.body ]
     %count = phi i32 [ %count.next, %while.body ], [ %N, %while.body.preheader ]
-    %scevgep7 = getelementptr i16, i16* %lsr.iv, i32 1
-    %scevgep4 = getelementptr i16, i16* %lsr.iv4, i32 1
-    %load = load i16, i16* %scevgep4, align 2
-    store i16 %load, i16* %scevgep7, align 2
+    %scevgep7 = getelementptr i16, ptr %lsr.iv, i32 1
+    %scevgep4 = getelementptr i16, ptr %lsr.iv4, i32 1
+    %load = load i16, ptr %scevgep4, align 2
+    store i16 %load, ptr %scevgep7, align 2
     %count.next = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %count, i32 1)
     %cmp1 = icmp ne i32 %count.next, 0
-    %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i16, i16* %lsr.iv4, i32 1
+    %scevgep1 = getelementptr i16, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i16, ptr %lsr.iv4, i32 1
     br i1 %cmp1, label %while.body, label %while.end
   
   while.end:                                        ; preds = %while.body, %not.preheader
@@ -51,7 +51,7 @@
   
   declare i1 @llvm.test.set.loop.iterations.i32(i32) #0
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { noduplicate nounwind }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir
index a6458ce1a530f..5dd75d94d319b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revert-while.mir
@@ -5,29 +5,29 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define void @ne_trip_count(i1 zeroext %t1, i32* nocapture %a, i32* nocapture readonly %b, i32 %N) #0 {
+  define void @ne_trip_count(i1 zeroext %t1, ptr nocapture %a, ptr nocapture readonly %b, i32 %N) #0 {
   entry:
     %0 = call i1 @llvm.test.set.loop.iterations.i32(i32 %N)
     br i1 %0, label %do.body.preheader, label %if.end
 
   do.body.preheader:                                ; preds = %entry
-    %scevgep2 = getelementptr i32, i32* %a, i32 -1
-    %scevgep5 = getelementptr i32, i32* %b, i32 -1
+    %scevgep2 = getelementptr i32, ptr %a, i32 -1
+    %scevgep5 = getelementptr i32, ptr %b, i32 -1
     br label %do.body
 
   do.body:                                          ; preds = %do.body, %do.body.preheader
-    %lsr.iv6 = phi i32* [ %scevgep5, %do.body.preheader ], [ %scevgep7, %do.body ]
-    %lsr.iv = phi i32* [ %scevgep2, %do.body.preheader ], [ %scevgep3, %do.body ]
+    %lsr.iv6 = phi ptr [ %scevgep5, %do.body.preheader ], [ %scevgep7, %do.body ]
+    %lsr.iv = phi ptr [ %scevgep2, %do.body.preheader ], [ %scevgep3, %do.body ]
     %1 = phi i32 [ %2, %do.body ], [ %N, %do.body.preheader ]
-    %scevgep = getelementptr i32, i32* %lsr.iv6, i32 1
-    %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1
+    %scevgep = getelementptr i32, ptr %lsr.iv6, i32 1
+    %scevgep1 = getelementptr i32, ptr %lsr.iv, i32 1
     %size = call i32 @llvm.arm.space(i32 4096, i32 undef)
-    %tmp = load i32, i32* %scevgep, align 4
-    store i32 %tmp, i32* %scevgep1, align 4
+    %tmp = load i32, ptr %scevgep, align 4
+    store i32 %tmp, ptr %scevgep1, align 4
     %2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %1, i32 1)
     %3 = icmp ne i32 %2, 0
-    %scevgep3 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep7 = getelementptr i32, i32* %lsr.iv6, i32 1
+    %scevgep3 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep7 = getelementptr i32, ptr %lsr.iv6, i32 1
     br i1 %3, label %do.body, label %if.end
 
   if.end:                                           ; preds = %do.body, %entry

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir
index dff79b15ef8dd..2f9019ddeca8d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/revertcallearly.mir
@@ -7,7 +7,7 @@
 
   define i32 @e() optsize {
   entry:
-    %.pr = load i32, i32* @d, align 4
+    %.pr = load i32, ptr @d, align 4
     %cmp13 = icmp sgt i32 %.pr, -1
     br i1 %cmp13, label %for.cond1.preheader.preheader, label %for.end9
 
@@ -18,20 +18,20 @@
 
   for.cond1.preheader:                              ; preds = %for.cond1.preheader.preheader, %for.cond1.preheader
     %2 = phi i32 [ %1, %for.cond1.preheader.preheader ], [ %3, %for.cond1.preheader ]
-    call void @llvm.memset.p0i8.i32(i8* nonnull align 4 dereferenceable(24) bitcast ([1 x i32]* @c to i8*), i8 0, i32 24, i1 false)
+    call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(24) @c, i8 0, i32 24, i1 false)
     %3 = call i32 @llvm.loop.decrement.reg.i32(i32 %2, i32 1)
     %4 = icmp ne i32 %3, 0
     br i1 %4, label %for.cond1.preheader, label %for.cond.for.end9_crit_edge
 
   for.cond.for.end9_crit_edge:                      ; preds = %for.cond1.preheader
-    store i32 -1, i32* @d, align 4
+    store i32 -1, ptr @d, align 4
     br label %for.end9
 
   for.end9:                                         ; preds = %for.cond.for.end9_crit_edge, %entry
     ret i32 undef
   }
 
-  declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+  declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
 

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir
index 065e0e20ce377..f7bac9107f55a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-def-no-mov.mir
@@ -7,10 +7,10 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @do_copy(i32 %n, ptr nocapture %p, ptr nocapture readonly %q) {
   entry:
-    %scevgep = getelementptr i32, i32* %q, i32 -1
-    %scevgep3 = getelementptr i32, i32* %p, i32 -1
+    %scevgep = getelementptr i32, ptr %q, i32 -1
+    %scevgep3 = getelementptr i32, ptr %p, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %n)
     br label %preheader
 
@@ -18,15 +18,15 @@
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %entry
-    %lsr.iv4 = phi i32* [ %scevgep5, %while.body ], [ %scevgep3, %preheader ]
-    %lsr.iv = phi i32* [ %scevgep1, %while.body ], [ %scevgep, %preheader ]
+    %lsr.iv4 = phi ptr [ %scevgep5, %while.body ], [ %scevgep3, %preheader ]
+    %lsr.iv = phi ptr [ %scevgep1, %while.body ], [ %scevgep, %preheader ]
     %0 = phi i32 [ %start, %preheader ], [ %2, %while.body ]
-    %scevgep6 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep2 = getelementptr i32, i32* %lsr.iv4, i32 1
-    %1 = load i32, i32* %scevgep6, align 4
-    store i32 %1, i32* %scevgep2, align 4
-    %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep2 = getelementptr i32, ptr %lsr.iv4, i32 1
+    %1 = load i32, ptr %scevgep6, align 4
+    store i32 %1, ptr %scevgep2, align 4
+    %scevgep1 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i32, ptr %lsr.iv4, i32 1
     %2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %3 = icmp ne i32 %2, 0
     br i1 %3, label %while.body, label %while.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir
index f16e4b8bb8027..4e4923a3ceadf 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/safe-retaining.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
 
 --- |
-  define arm_aapcs_vfpcc void @test_vqrshruntq_n_s32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) {
+  define arm_aapcs_vfpcc void @test_vqrshruntq_n_s32(ptr %a, ptr %b, ptr %c, i32 %elts, i32 %iters) {
   entry:
     %cmp = icmp slt i32 %elts, 1
     br i1 %cmp, label %exit, label %loop.ph
@@ -14,20 +14,20 @@
   loop.body:                                        ; preds = %loop.body, %loop.ph
     %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %start, %loop.ph ]
     %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ]
-    %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
-    %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
-    %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
+    %addr.a = phi ptr [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
+    %addr.b = phi ptr [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
+    %addr.c = phi ptr [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
     %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count)
     %elts.rem = sub i32 %count, 4
-    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
-    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
     %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16>
     %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %masked.load.b, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1)
     %bitcast = bitcast <8 x i16> %shrn to <4 x i32>
-    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred)
-    %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1
-    %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1
-    %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1
+    call void @llvm.masked.store.v4i32.p0(<4 x i32> %bitcast, ptr %addr.c, i32 4, <4 x i1> %pred)
+    %addr.a.next = getelementptr <4 x i32>, ptr %addr.a, i32 1
+    %addr.b.next = getelementptr <4 x i32>, ptr %addr.b, i32 1
+    %addr.c.next = getelementptr <4 x i32>, ptr %addr.c, i32 1
     %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %end = icmp ne i32 %loop.dec, 0
     %lsr.iv.next = add i32 %lsr.iv, -1
@@ -37,7 +37,7 @@
     ret void
   }
 
-  define arm_aapcs_vfpcc void @test_vqrshruntq_n_s16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c, i32 %elts, i32 %iters) {
+  define arm_aapcs_vfpcc void @test_vqrshruntq_n_s16(ptr %a, ptr %b, ptr %c, i32 %elts, i32 %iters) {
   entry:
     %cmp = icmp slt i32 %elts, 1
     br i1 %cmp, label %exit, label %loop.ph
@@ -49,20 +49,20 @@
   loop.body:                                        ; preds = %loop.body, %loop.ph
     %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %start, %loop.ph ]
     %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ]
-    %addr.a = phi <8 x i16>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
-    %addr.b = phi <8 x i16>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
-    %addr.c = phi <8 x i16>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
+    %addr.a = phi ptr [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
+    %addr.b = phi ptr [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
+    %addr.c = phi ptr [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
     %pred = call <8 x i1> @llvm.arm.mve.vctp16(i32 %count)
     %elts.rem = sub i32 %count, 8
-    %masked.load.a = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.a, i32 2, <8 x i1> %pred, <8 x i16> undef)
-    %masked.load.b = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr.b, i32 2, <8 x i1> %pred, <8 x i16> undef)
+    %masked.load.a = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %addr.a, i32 2, <8 x i1> %pred, <8 x i16> undef)
+    %masked.load.b = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %addr.b, i32 2, <8 x i1> %pred, <8 x i16> undef)
     %bitcast.a = bitcast <8 x i16> %masked.load.a to <16 x i8>
     %shrn = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> %bitcast.a, <8 x i16> %masked.load.b, i32 1, i32 1, i32 0, i32 1, i32 0, i32 1)
     %bitcast = bitcast <16 x i8> %shrn to <8 x i16>
-    call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %bitcast, <8 x i16>* %addr.c, i32 2, <8 x i1> %pred)
-    %addr.a.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1
-    %addr.b.next = getelementptr <8 x i16>, <8 x i16>* %addr.b, i32 1
-    %addr.c.next = getelementptr <8 x i16>, <8 x i16>* %addr.c, i32 1
+    call void @llvm.masked.store.v8i16.p0(<8 x i16> %bitcast, ptr %addr.c, i32 2, <8 x i1> %pred)
+    %addr.a.next = getelementptr <8 x i16>, ptr %addr.b, i32 1
+    %addr.b.next = getelementptr <8 x i16>, ptr %addr.b, i32 1
+    %addr.c.next = getelementptr <8 x i16>, ptr %addr.c, i32 1
     %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %end = icmp ne i32 %loop.dec, 0
     %lsr.iv.next = add i32 %lsr.iv, -1
@@ -75,12 +75,12 @@
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
   declare <4 x i1> @llvm.arm.mve.vctp32(i32)
-  declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-  declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+  declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+  declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
   declare <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32, i32, i32, i32)
   declare <8 x i1> @llvm.arm.mve.vctp16(i32)
-  declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
-  declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
+  declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
+  declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
   declare <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8>, <8 x i16>, i32, i32, i32, i32, i32, i32)
 
 ...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir
index b069ed9927e68..c87c0bb1e2fdd 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/size-limit.mir
@@ -7,15 +7,15 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define dso_local arm_aapcscc void @size_limit(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) {
+  define dso_local arm_aapcscc void @size_limit(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) {
   entry:
     %cmp8 = icmp eq i32 %N, 0
     br i1 %cmp8, label %for.cond.cleanup, label %for.body.preheader
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr i32, i32* %a, i32 -1
-    %scevgep4 = getelementptr i32, i32* %c, i32 -1
-    %scevgep8 = getelementptr i32, i32* %b, i32 -1
+    %scevgep = getelementptr i32, ptr %a, i32 -1
+    %scevgep4 = getelementptr i32, ptr %c, i32 -1
+    %scevgep8 = getelementptr i32, ptr %b, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %N)
     br label %for.body
 
@@ -23,21 +23,21 @@
     ret void
 
   for.body:                                         ; preds = %for.body, %for.body.preheader
-    %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-    %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %0 = phi i32 [ %start, %for.body.preheader ], [ %3, %for.body ]
     %size = call i32 @llvm.arm.space(i32 4070, i32 undef)
-    %scevgep3 = getelementptr i32, i32* %lsr.iv9, i32 1
-    %1 = load i32, i32* %scevgep3, align 4
-    %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %2 = load i32, i32* %scevgep7, align 4
+    %scevgep3 = getelementptr i32, ptr %lsr.iv9, i32 1
+    %1 = load i32, ptr %scevgep3, align 4
+    %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %2 = load i32, ptr %scevgep7, align 4
     %mul = mul nsw i32 %2, %1
-    %scevgep11 = getelementptr i32, i32* %lsr.iv1, i32 1
-    store i32 %mul, i32* %scevgep11, align 4
-    %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+    %scevgep11 = getelementptr i32, ptr %lsr.iv1, i32 1
+    store i32 %mul, ptr %scevgep11, align 4
+    %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
     %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %4 = icmp ne i32 %3, 0
     br i1 %4, label %for.body, label %for.cond.cleanup
@@ -53,7 +53,7 @@
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
   attributes #1 = { noduplicate nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.mir
index c376816d6706b..1bd1d6b99e422 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/spillingmove.mir
@@ -3,13 +3,13 @@
 
 --- |
   %struct.arm_2d_size_t = type { i16, i16 }
-  define void @none(i16* noalias nocapture %phwTargetBase, i16 signext %iTargetStride, %struct.arm_2d_size_t* noalias nocapture readonly %ptCopySize, i16 zeroext %hwColour, i32 %chRatio) {
+  define void @none(ptr noalias nocapture %phwTargetBase, i16 signext %iTargetStride, ptr noalias nocapture readonly %ptCopySize, i16 zeroext %hwColour, i32 %chRatio) {
     unreachable
   }
-  define void @copyin(i16* noalias nocapture %phwTargetBase, i16 signext %iTargetStride, %struct.arm_2d_size_t* noalias nocapture readonly %ptCopySize, i16 zeroext %hwColour, i32 %chRatio) {
+  define void @copyin(ptr noalias nocapture %phwTargetBase, i16 signext %iTargetStride, ptr noalias nocapture readonly %ptCopySize, i16 zeroext %hwColour, i32 %chRatio) {
     unreachable
   }
-  define void @copyout(i16* noalias nocapture %phwTargetBase, i16 signext %iTargetStride, %struct.arm_2d_size_t* noalias nocapture readonly %ptCopySize, i16 zeroext %hwColour, i32 %chRatio) {
+  define void @copyout(ptr noalias nocapture %phwTargetBase, i16 signext %iTargetStride, ptr noalias nocapture readonly %ptCopySize, i16 zeroext %hwColour, i32 %chRatio) {
     unreachable
   }
 ...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/switch.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/switch.mir
index 54baaa12fbdf1..9b369cb38fe1a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/switch.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/switch.mir
@@ -11,7 +11,7 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
   
-  define dso_local arm_aapcscc i32 @search(i8* nocapture readonly %c, i32 %N) {
+  define dso_local arm_aapcscc i32 @search(ptr nocapture readonly %c, i32 %N) {
   entry:
     %cmp11 = icmp eq i32 %N, 0
     br i1 %cmp11, label %for.cond.cleanup, label %for.body.preheader
@@ -27,11 +27,11 @@
     ret i32 %sub
   
   for.body:                                         ; preds = %for.inc, %for.body.preheader
-    %lsr.iv1 = phi i8* [ %c, %for.body.preheader ], [ %scevgep, %for.inc ]
+    %lsr.iv1 = phi ptr [ %c, %for.body.preheader ], [ %scevgep, %for.inc ]
     %spaces.013 = phi i32 [ %spaces.1, %for.inc ], [ 0, %for.body.preheader ]
     %found.012 = phi i32 [ %found.1, %for.inc ], [ 0, %for.body.preheader ]
     %0 = phi i32 [ %start, %for.body.preheader ], [ %3, %for.inc ]
-    %1 = load i8, i8* %lsr.iv1, align 1
+    %1 = load i8, ptr %lsr.iv1, align 1
     %2 = zext i8 %1 to i32
     switch i32 %2, label %for.inc [
       i32 108, label %sw.bb
@@ -51,7 +51,7 @@
   for.inc:                                          ; preds = %sw.bb1, %sw.bb, %for.body
     %found.1 = phi i32 [ %found.012, %for.body ], [ %found.012, %sw.bb1 ], [ %inc, %sw.bb ]
     %spaces.1 = phi i32 [ %spaces.013, %for.body ], [ %inc2, %sw.bb1 ], [ %spaces.013, %sw.bb ]
-    %scevgep = getelementptr i8, i8* %lsr.iv1, i32 1
+    %scevgep = getelementptr i8, ptr %lsr.iv1, i32 1
     %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %4 = icmp ne i32 %3, 0
     br i1 %4, label %for.body, label %for.cond.cleanup
@@ -64,7 +64,7 @@
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
   
   attributes #0 = { noduplicate nounwind }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir
index 863d1f9e03242..14a64e9946fd4 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-def.mir
@@ -3,28 +3,28 @@
 # Check that subs isn't used during the revert because there's a def after LoopDec.
 
 --- |
-  define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @do_copy(i32 %n, ptr nocapture %p, ptr nocapture readonly %q) {
   entry:
-    %scevgep = getelementptr i32, i32* %q, i32 -1
-    %scevgep3 = getelementptr i32, i32* %p, i32 -1
+    %scevgep = getelementptr i32, ptr %q, i32 -1
+    %scevgep3 = getelementptr i32, ptr %p, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %n)
     %limit = lshr i32 %n, 1
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %entry
-    %lsr.iv4 = phi i32* [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
-    %lsr.iv = phi i32* [ %scevgep1, %while.body ], [ %scevgep, %entry ]
+    %lsr.iv4 = phi ptr [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
+    %lsr.iv = phi ptr [ %scevgep1, %while.body ], [ %scevgep, %entry ]
     %tmp = phi i32 [ %start, %entry ], [ %tmp2, %while.body ]
-    %scevgep7 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep4 = getelementptr i32, i32* %lsr.iv4, i32 1
-    %tmp1 = load i32, i32* %scevgep7, align 4
+    %scevgep7 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep4 = getelementptr i32, ptr %lsr.iv4, i32 1
+    %tmp1 = load i32, ptr %scevgep7, align 4
     %tmp2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp, i32 1)
     %half = lshr i32 %tmp1, 1
     %cmp = icmp ult i32 %tmp, %limit
     %res = select i1 %cmp, i32 %tmp1, i32 %half
-    store i32 %res, i32* %scevgep4, align 4
-    %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1
+    store i32 %res, ptr %scevgep4, align 4
+    %scevgep1 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i32, ptr %lsr.iv4, i32 1
     %tmp3 = icmp ne i32 %tmp2, 0
     br i1 %tmp3, label %while.body, label %while.end
 
@@ -39,7 +39,7 @@
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { noduplicate nounwind }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
index b873c52a75062..d64e975b77491 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
@@ -3,28 +3,28 @@
 # Check that subs isn't used during the revert because there's a cpsr use after it.
 
 --- |
-  define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @do_copy(i32 %n, ptr nocapture %p, ptr nocapture readonly %q) {
   entry:
-    %scevgep = getelementptr i32, i32* %q, i32 -1
-    %scevgep3 = getelementptr i32, i32* %p, i32 -1
+    %scevgep = getelementptr i32, ptr %q, i32 -1
+    %scevgep3 = getelementptr i32, ptr %p, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %n)
     %limit = lshr i32 %n, 1
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %entry
-    %lsr.iv4 = phi i32* [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
-    %lsr.iv = phi i32* [ %scevgep1, %while.body ], [ %scevgep, %entry ]
+    %lsr.iv4 = phi ptr [ %scevgep5, %while.body ], [ %scevgep3, %entry ]
+    %lsr.iv = phi ptr [ %scevgep1, %while.body ], [ %scevgep, %entry ]
     %tmp = phi i32 [ %start, %entry ], [ %tmp2, %while.body ]
-    %scevgep7 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep4 = getelementptr i32, i32* %lsr.iv4, i32 1
-    %tmp1 = load i32, i32* %scevgep7, align 4
+    %scevgep7 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep4 = getelementptr i32, ptr %lsr.iv4, i32 1
+    %tmp1 = load i32, ptr %scevgep7, align 4
     %tmp2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %tmp, i32 1)
     %half = lshr i32 %tmp1, 1
     %cmp = icmp ult i32 %tmp, %limit
     %res = select i1 %cmp, i32 %tmp1, i32 %half
-    store i32 %res, i32* %scevgep4, align 4
-    %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1
+    store i32 %res, ptr %scevgep4, align 4
+    %scevgep1 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i32, ptr %lsr.iv4, i32 1
     %tmp3 = icmp ne i32 %tmp2, 0
     br i1 %tmp3, label %while.body, label %while.end
 
@@ -39,7 +39,7 @@
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #0
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { noduplicate nounwind }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir
index 0e66773b77ddc..6642c1ad97797 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-retaining.mir
@@ -2,7 +2,7 @@
 # RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
 
 --- |
-  define arm_aapcs_vfpcc void @test_vmvn(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 {
+  define arm_aapcs_vfpcc void @test_vmvn(ptr %a, ptr %b, ptr %c, i32 %elts, i32 %iters) #0 {
   entry:
     %cmp = icmp slt i32 %elts, 1
     br i1 %cmp, label %exit, label %loop.ph
@@ -14,21 +14,21 @@
   loop.body:                                        ; preds = %loop.body, %loop.ph
     %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %start, %loop.ph ]
     %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ]
-    %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
-    %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
-    %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
+    %addr.a = phi ptr [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
+    %addr.b = phi ptr [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
+    %addr.c = phi ptr [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
     %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count)
     %elts.rem = sub i32 %count, 4
-    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
-    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
     %not = xor <4 x i32> %masked.load.b, <i32 -1, i32 -1, i32 -1, i32 -1>
     %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16>
     %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %not, i32 15, i32 1, i32 0, i32 0, i32 0, i32 0)
     %bitcast = bitcast <8 x i16> %shrn to <4 x i32>
-    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred)
-    %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1
-    %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1
-    %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1
+    call void @llvm.masked.store.v4i32.p0(<4 x i32> %bitcast, ptr %addr.c, i32 4, <4 x i1> %pred)
+    %addr.a.next = getelementptr <4 x i32>, ptr %addr.a, i32 1
+    %addr.b.next = getelementptr <4 x i32>, ptr %addr.b, i32 1
+    %addr.c.next = getelementptr <4 x i32>, ptr %addr.c, i32 1
     %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %end = icmp ne i32 %loop.dec, 0
     %lsr.iv.next = add i32 %lsr.iv, -1
@@ -38,7 +38,7 @@
     ret void
   }
 
-  define arm_aapcs_vfpcc void @test_vorn(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c, i32 %elts, i32 %iters) #0 {
+  define arm_aapcs_vfpcc void @test_vorn(ptr %a, ptr %b, ptr %c, i32 %elts, i32 %iters) #0 {
   entry:
     %cmp = icmp slt i32 %elts, 1
     br i1 %cmp, label %exit, label %loop.ph
@@ -50,22 +50,22 @@
   loop.body:                                        ; preds = %loop.body, %loop.ph
     %lsr.iv = phi i32 [ %lsr.iv.next, %loop.body ], [ %start, %loop.ph ]
     %count = phi i32 [ %elts, %loop.ph ], [ %elts.rem, %loop.body ]
-    %addr.a = phi <4 x i32>* [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
-    %addr.b = phi <4 x i32>* [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
-    %addr.c = phi <4 x i32>* [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
+    %addr.a = phi ptr [ %a, %loop.ph ], [ %addr.a.next, %loop.body ]
+    %addr.b = phi ptr [ %b, %loop.ph ], [ %addr.b.next, %loop.body ]
+    %addr.c = phi ptr [ %c, %loop.ph ], [ %addr.c.next, %loop.body ]
     %pred = call <4 x i1> @llvm.arm.mve.vctp32(i32 %count)
     %elts.rem = sub i32 %count, 4
-    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
-    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.a = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.a, i32 4, <4 x i1> %pred, <4 x i32> undef)
+    %masked.load.b = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %addr.b, i32 4, <4 x i1> %pred, <4 x i32> undef)
     %not = xor <4 x i32> %masked.load.b, <i32 -1, i32 -1, i32 -1, i32 -1>
     %or = or <4 x i32> %not, %masked.load.a
     %bitcast.a = bitcast <4 x i32> %masked.load.a to <8 x i16>
     %shrn = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> %bitcast.a, <4 x i32> %or, i32 3, i32 1, i32 0, i32 1, i32 0, i32 1)
     %bitcast = bitcast <8 x i16> %shrn to <4 x i32>
-    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %bitcast, <4 x i32>* %addr.c, i32 4, <4 x i1> %pred)
-    %addr.a.next = getelementptr <4 x i32>, <4 x i32>* %addr.a, i32 1
-    %addr.b.next = getelementptr <4 x i32>, <4 x i32>* %addr.b, i32 1
-    %addr.c.next = getelementptr <4 x i32>, <4 x i32>* %addr.c, i32 1
+    call void @llvm.masked.store.v4i32.p0(<4 x i32> %bitcast, ptr %addr.c, i32 4, <4 x i1> %pred)
+    %addr.a.next = getelementptr <4 x i32>, ptr %addr.a, i32 1
+    %addr.b.next = getelementptr <4 x i32>, ptr %addr.b, i32 1
+    %addr.c.next = getelementptr <4 x i32>, ptr %addr.c, i32 1
     %loop.dec = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv, i32 1)
     %end = icmp ne i32 %loop.dec, 0
     %lsr.iv.next = add i32 %lsr.iv, -1
@@ -78,8 +78,8 @@
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
   declare <4 x i1> @llvm.arm.mve.vctp32(i32)
-  declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-  declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+  declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+  declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
   declare <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16>, <4 x i32>, i32, i32, i32, i32, i32, i32)
 
 ...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir
index 3a85ab5af293b..2c8caa162db7f 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-use-after.mir
@@ -5,10 +5,10 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define i32 @do_copy(i32 %n, i32* nocapture %p, i32* nocapture readonly %q) {
+  define i32 @do_copy(i32 %n, ptr nocapture %p, ptr nocapture readonly %q) {
   entry:
-    %scevgep = getelementptr i32, i32* %q, i32 -1
-    %scevgep3 = getelementptr i32, i32* %p, i32 -1
+    %scevgep = getelementptr i32, ptr %q, i32 -1
+    %scevgep3 = getelementptr i32, ptr %p, i32 -1
     %start = call i32 @llvm.start.loop.iterations.i32(i32 %n)
     br label %preheader
 
@@ -16,15 +16,15 @@
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %entry
-    %lsr.iv4 = phi i32* [ %scevgep5, %while.body ], [ %scevgep3, %preheader ]
-    %lsr.iv = phi i32* [ %scevgep1, %while.body ], [ %scevgep, %preheader ]
+    %lsr.iv4 = phi ptr [ %scevgep5, %while.body ], [ %scevgep3, %preheader ]
+    %lsr.iv = phi ptr [ %scevgep1, %while.body ], [ %scevgep, %preheader ]
     %0 = phi i32 [ %start, %preheader ], [ %2, %while.body ]
-    %scevgep6 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep2 = getelementptr i32, i32* %lsr.iv4, i32 1
-    %1 = load i32, i32* %scevgep6, align 4
-    store i32 %1, i32* %scevgep2, align 4
-    %scevgep1 = getelementptr i32, i32* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i32, i32* %lsr.iv4, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep2 = getelementptr i32, ptr %lsr.iv4, i32 1
+    %1 = load i32, ptr %scevgep6, align 4
+    store i32 %1, ptr %scevgep2, align 4
+    %scevgep1 = getelementptr i32, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i32, ptr %lsr.iv4, i32 1
     %2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %3 = icmp ne i32 %2, 0
     br i1 %3, label %while.body, label %while.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir
index a3d28cfdd1d65..84fd81098cd98 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir
@@ -5,7 +5,7 @@
   @_ZL3arr = internal global [10 x i32] [i32 1, i32 2, i32 3, i32 5, i32 5, i32 5, i32 -2, i32 0, i32 -8, i32 -1], align 4
   @.str = private unnamed_addr constant [5 x i8] c"%d, \00", align 1
 
-  define arm_aapcs_vfpcc void @vpt_block(i32* nocapture %A, i32 %n, i32 %x) {
+  define arm_aapcs_vfpcc void @vpt_block(ptr nocapture %A, i32 %n, i32 %x) {
   entry:
     %cmp9 = icmp sgt i32 %n, 0
     %0 = add i32 %n, 3
@@ -22,13 +22,12 @@
     br label %vector.body
 
   vector.body:                                      ; preds = %vector.body, %vector.ph
-    %lsr.iv1 = phi i32* [ %scevgep, %vector.body ], [ %A, %vector.ph ]
+    %lsr.iv1 = phi ptr [ %scevgep, %vector.body ], [ %A, %vector.ph ]
     %6 = phi i32 [ %start, %vector.ph ], [ %18, %vector.body ]
     %7 = phi i32 [ %n, %vector.ph ], [ %9, %vector.body ]
-    %lsr.iv12 = bitcast i32* %lsr.iv1 to <4 x i32>*
     %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
     %9 = sub i32 %7, 4
-    %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv12, i32 4, <4 x i1> %8, <4 x i32> undef)
+    %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv1, i32 4, <4 x i1> %8, <4 x i32> undef)
     %10 = insertelement <4 x i32> undef, i32 %x, i32 0
     %11 = shufflevector <4 x i32> %10, <4 x i32> undef, <4 x i32> zeroinitializer
     %12 = icmp slt <4 x i32> %wide.masked.load, %11
@@ -37,8 +36,8 @@
     %15 = icmp sgt <4 x i32> %wide.masked.load, %14
     %16 = and <4 x i1> %12, %15
     %17 = and <4 x i1> %16, %8
-    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> zeroinitializer, <4 x i32>* %lsr.iv12, i32 4, <4 x i1> %17)
-    %scevgep = getelementptr i32, i32* %lsr.iv1, i32 4
+    call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr %lsr.iv1, i32 4, <4 x i1> %17)
+    %scevgep = getelementptr i32, ptr %lsr.iv1, i32 4
     %18 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
     %19 = icmp ne i32 %18, 0
     br i1 %19, label %vector.body, label %for.cond.cleanup
@@ -47,7 +46,7 @@
     ret void
   }
 
-  define arm_aapcs_vfpcc void @
diff erent_vcpt_reaching_def(i32* nocapture %A, i32 %n, i32 %x) {
+  define arm_aapcs_vfpcc void @
diff erent_vcpt_reaching_def(ptr nocapture %A, i32 %n, i32 %x) {
     ; Intentionally left blank - see MIR sequence below.
     entry:
       unreachable
@@ -59,7 +58,7 @@
       unreachable
   }
 
-  define arm_aapcs_vfpcc void @
diff erent_vcpt_operand(i32* nocapture %A, i32 %n, i32 %x) {
+  define arm_aapcs_vfpcc void @
diff erent_vcpt_operand(ptr nocapture %A, i32 %n, i32 %x) {
     ; Intentionally left blank - see MIR sequence below.
     entry:
       unreachable
@@ -71,7 +70,7 @@
       unreachable
   }
 
-  define arm_aapcs_vfpcc void @else_vcpt(i32* nocapture %data, i32 %N, i32 %T) {
+  define arm_aapcs_vfpcc void @else_vcpt(ptr nocapture %data, i32 %N, i32 %T) {
   entry:
     %cmp9 = icmp sgt i32 %N, 0
     %0 = add i32 %N, 3
@@ -88,13 +87,12 @@
     br label %vector.body
 
   vector.body:                                      ; preds = %vector.body, %vector.ph
-    %lsr.iv1 = phi i32* [ %scevgep, %vector.body ], [ %data, %vector.ph ]
+    %lsr.iv1 = phi ptr [ %scevgep, %vector.body ], [ %data, %vector.ph ]
     %6 = phi i32 [ %start, %vector.ph ], [ %18, %vector.body ]
     %7 = phi i32 [ %N, %vector.ph ], [ %9, %vector.body ]
-    %lsr.iv12 = bitcast i32* %lsr.iv1 to <4 x i32>*
     %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
     %9 = sub i32 %7, 4
-    %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv12, i32 4, <4 x i1> %8, <4 x i32> undef)
+    %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %lsr.iv1, i32 4, <4 x i1> %8, <4 x i32> undef)
     %10 = insertelement <4 x i32> undef, i32 %T, i32 0
     %11 = shufflevector <4 x i32> %10, <4 x i32> undef, <4 x i32> zeroinitializer
     %12 = icmp slt <4 x i32> %wide.masked.load, %11
@@ -103,8 +101,8 @@
     %15 = icmp sgt <4 x i32> %wide.masked.load, %14
     %16 = or <4 x i1> %12, %15
     %17 = and <4 x i1> %16, %8
-    call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> zeroinitializer, <4 x i32>* %lsr.iv12, i32 4, <4 x i1> %17)
-    %scevgep = getelementptr i32, i32* %lsr.iv1, i32 4
+    call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr %lsr.iv1, i32 4, <4 x i1> %17)
+    %scevgep = getelementptr i32, ptr %lsr.iv1, i32 4
     %18 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
     %19 = icmp ne i32 %18, 0
     br i1 %19, label %vector.body, label %for.cond.cleanup
@@ -113,7 +111,7 @@
     ret void
   }
 
-  define arm_aapcs_vfpcc void @loop_invariant_vpt_operands(i32* nocapture %A, i32 %n, i32 %x) {
+  define arm_aapcs_vfpcc void @loop_invariant_vpt_operands(ptr nocapture %A, i32 %n, i32 %x) {
     ; Intentionally left blank - see MIR sequence below.
     entry:
       unreachable
@@ -125,7 +123,7 @@
       unreachable
   }
 
-  define arm_aapcs_vfpcc void @vctp_before_vpt(i32* nocapture %A, i32 %n, i32 %x) {
+  define arm_aapcs_vfpcc void @vctp_before_vpt(ptr nocapture %A, i32 %n, i32 %x) {
     ; Intentionally left blank - see MIR sequence below.
     entry:
       unreachable
@@ -137,7 +135,7 @@
       unreachable
   }
 
-  define arm_aapcs_vfpcc void @vpt_load_vctp_store(i32* nocapture %A, i32 %n, i32 %x) {
+  define arm_aapcs_vfpcc void @vpt_load_vctp_store(ptr nocapture %A, i32 %n, i32 %x) {
     ; Intentionally left blank - see MIR sequence below.
     entry:
       unreachable
@@ -159,8 +157,8 @@
     unreachable
   }
 
-  declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-  declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+  declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
+  declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
   declare i32 @llvm.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
   declare <4 x i1> @llvm.arm.mve.vctp32(i32)

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-negative-offset.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-negative-offset.mir
index 2690ca170f1a4..a6ae8bc75a99e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-negative-offset.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-negative-offset.mir
@@ -6,7 +6,7 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
   
-  define void @size_limit(i32* nocapture %a, i32* nocapture readonly %b, i32* nocapture readonly %c, i32 %N) #0 {
+  define void @size_limit(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) #0 {
   entry:
     br label %while
   
@@ -14,26 +14,26 @@
     ret void
   
   for.body.preheader:                               ; preds = %while
-    %scevgep = getelementptr i32, i32* %a, i32 -1
-    %scevgep4 = getelementptr i32, i32* %c, i32 -1
-    %scevgep8 = getelementptr i32, i32* %b, i32 -1
+    %scevgep = getelementptr i32, ptr %a, i32 -1
+    %scevgep4 = getelementptr i32, ptr %c, i32 -1
+    %scevgep8 = getelementptr i32, ptr %b, i32 -1
     br label %for.body
   
   for.body:                                         ; preds = %for.body, %for.body.preheader
-    %lsr.iv9 = phi i32* [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
-    %lsr.iv5 = phi i32* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi i32* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv9 = phi ptr [ %scevgep8, %for.body.preheader ], [ %scevgep10, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %0 = phi i32 [ %N, %for.body.preheader ], [ %3, %for.body ]
-    %scevgep11 = getelementptr i32, i32* %lsr.iv9, i32 1
-    %1 = load i32, i32* %scevgep11, align 4
-    %scevgep7 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %2 = load i32, i32* %scevgep7, align 4
+    %scevgep11 = getelementptr i32, ptr %lsr.iv9, i32 1
+    %1 = load i32, ptr %scevgep11, align 4
+    %scevgep7 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %2 = load i32, ptr %scevgep7, align 4
     %mul = mul nsw i32 %2, %1
-    %scevgep3 = getelementptr i32, i32* %lsr.iv1, i32 1
-    store i32 %mul, i32* %scevgep3, align 4
-    %scevgep2 = getelementptr i32, i32* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr i32, i32* %lsr.iv5, i32 1
-    %scevgep10 = getelementptr i32, i32* %lsr.iv9, i32 1
+    %scevgep3 = getelementptr i32, ptr %lsr.iv1, i32 1
+    store i32 %mul, ptr %scevgep3, align 4
+    %scevgep2 = getelementptr i32, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr i32, ptr %lsr.iv5, i32 1
+    %scevgep10 = getelementptr i32, ptr %lsr.iv9, i32 1
     %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1)
     %4 = icmp ne i32 %3, 0
     br i1 %4, label %for.body, label %for.cond.cleanup

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir
index 590917607a731..d91cd958347c7 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while.mir
@@ -7,28 +7,28 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main"
 
-  define dso_local arm_aapcscc void @copy(i16* nocapture %a, i16* nocapture readonly %b, i32 %N) {
+  define dso_local arm_aapcscc void @copy(ptr nocapture %a, ptr nocapture readonly %b, i32 %N) {
   entry:
     %0 = call i1 @llvm.test.set.loop.iterations.i32(i32 %N)
     br i1 %0, label %while.body.preheader, label %while.end
 
   while.body.preheader:                             ; preds = %entry
-    %scevgep = getelementptr i16, i16* %a, i32 -1
-    %scevgep3 = getelementptr i16, i16* %b, i32 -1
+    %scevgep = getelementptr i16, ptr %a, i32 -1
+    %scevgep3 = getelementptr i16, ptr %b, i32 -1
     br label %while.body
 
   while.body:                                       ; preds = %while.body, %while.body.preheader
-    %lsr.iv4 = phi i16* [ %scevgep3, %while.body.preheader ], [ %scevgep5, %while.body ]
-    %lsr.iv = phi i16* [ %scevgep, %while.body.preheader ], [ %scevgep1, %while.body ]
+    %lsr.iv4 = phi ptr [ %scevgep3, %while.body.preheader ], [ %scevgep5, %while.body ]
+    %lsr.iv = phi ptr [ %scevgep, %while.body.preheader ], [ %scevgep1, %while.body ]
     %1 = phi i32 [ %3, %while.body ], [ %N, %while.body.preheader ]
-    %scevgep7 = getelementptr i16, i16* %lsr.iv, i32 1
-    %scevgep4 = getelementptr i16, i16* %lsr.iv4, i32 1
-    %2 = load i16, i16* %scevgep4, align 2
-    store i16 %2, i16* %scevgep7, align 2
+    %scevgep7 = getelementptr i16, ptr %lsr.iv, i32 1
+    %scevgep4 = getelementptr i16, ptr %lsr.iv4, i32 1
+    %2 = load i16, ptr %scevgep4, align 2
+    store i16 %2, ptr %scevgep7, align 2
     %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %1, i32 1)
     %4 = icmp ne i32 %3, 0
-    %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
-    %scevgep5 = getelementptr i16, i16* %lsr.iv4, i32 1
+    %scevgep1 = getelementptr i16, ptr %lsr.iv, i32 1
+    %scevgep5 = getelementptr i16, ptr %lsr.iv4, i32 1
     br i1 %4, label %while.body, label %while.end
 
   while.end:                                        ; preds = %while.body, %entry

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-killed.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-killed.mir
index 754abfa9436ba..f04da82f57abf 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-killed.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-killed.mir
@@ -5,7 +5,7 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main-none-eabi"
 
-  define i32 @test(i64* nocapture %x, i32 %n, i64 %a, i32 %m) {
+  define i32 @test(ptr nocapture %x, i32 %n, i64 %a, i32 %m) {
   entry:
     %cmp.not = icmp eq i32 %n, 0
     %0 = call { i32, i1 } @llvm.test.start.loop.iterations.i32(i32 %n)
@@ -16,14 +16,14 @@
   if.then:                                          ; preds = %entry
     %conv = sext i32 %m to i64
     %div = sdiv i64 %a, %conv
-    %scevgep = getelementptr i64, i64* %x, i32 %m
+    %scevgep = getelementptr i64, ptr %x, i32 %m
     br label %do.body
 
   do.body:                                          ; preds = %do.body, %if.then
-    %lsr.iv = phi i64* [ %scevgep1, %do.body ], [ %scevgep, %if.then ]
+    %lsr.iv = phi ptr [ %scevgep1, %do.body ], [ %scevgep, %if.then ]
     %3 = phi i32 [ %2, %if.then ], [ %4, %do.body ]
-    store i64 %div, i64* %lsr.iv, align 8
-    %scevgep1 = getelementptr i64, i64* %lsr.iv, i32 1
+    store i64 %div, ptr %lsr.iv, align 8
+    %scevgep1 = getelementptr i64, ptr %lsr.iv, i32 1
     %4 = call i32 @llvm.loop.decrement.reg.i32(i32 %3, i32 1)
     %5 = icmp ne i32 %4, 0
     br i1 %5, label %do.body, label %if.end

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-pred.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-pred.mir
index 20ec60d622619..d87f014dc8a84 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-pred.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wls-search-pred.mir
@@ -5,7 +5,7 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main-none-unknown-eabihf"
 
-  define void @test_memset_preheader(i8* %x, i8* %y, i32 %n) {
+  define void @test_memset_preheader(ptr %x, ptr %y, i32 %n) {
   entry:
     %cmp6 = icmp ne i32 %n, 0
     %0 = call { i32, i1 } @llvm.test.start.loop.iterations.i32(i32 %n)
@@ -14,17 +14,17 @@
     br i1 %1, label %prehead, label %for.cond.cleanup
 
   prehead:                                          ; preds = %entry
-    call void @llvm.memset.p0i8.i32(i8* align 1 %x, i8 0, i32 %n, i1 false)
+    call void @llvm.memset.p0.i32(ptr align 1 %x, i8 0, i32 %n, i1 false)
     br label %for.body
 
   for.body:                                         ; preds = %for.body, %prehead
-    %x.addr.08 = phi i8* [ %add.ptr, %for.body ], [ %x, %prehead ]
-    %y.addr.07 = phi i8* [ %add.ptr1, %for.body ], [ %y, %prehead ]
+    %x.addr.08 = phi ptr [ %add.ptr, %for.body ], [ %x, %prehead ]
+    %y.addr.07 = phi ptr [ %add.ptr1, %for.body ], [ %y, %prehead ]
     %3 = phi i32 [ %2, %prehead ], [ %4, %for.body ]
-    %add.ptr = getelementptr inbounds i8, i8* %x.addr.08, i32 1
-    %add.ptr1 = getelementptr inbounds i8, i8* %y.addr.07, i32 1
-    %l = load i8, i8* %x.addr.08, align 1
-    store i8 %l, i8* %y.addr.07, align 1
+    %add.ptr = getelementptr inbounds i8, ptr %x.addr.08, i32 1
+    %add.ptr1 = getelementptr inbounds i8, ptr %y.addr.07, i32 1
+    %l = load i8, ptr %x.addr.08, align 1
+    store i8 %l, ptr %y.addr.07, align 1
     %4 = call i32 @llvm.loop.decrement.reg.i32(i32 %3, i32 1)
     %5 = icmp ne i32 %4, 0
     br i1 %5, label %for.body, label %for.cond.cleanup
@@ -33,7 +33,7 @@
     ret void
   }
 
-  declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+  declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
   declare { i32, i1 } @llvm.test.start.loop.iterations.i32(i32)
   declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
 

diff  --git a/llvm/test/CodeGen/Thumb2/bti-const-island.mir b/llvm/test/CodeGen/Thumb2/bti-const-island.mir
index a861e1e3deb4a..3aa73895545b7 100644
--- a/llvm/test/CodeGen/Thumb2/bti-const-island.mir
+++ b/llvm/test/CodeGen/Thumb2/bti-const-island.mir
@@ -10,9 +10,9 @@
 # bb.1.bb42.i.
 
 --- |
-  declare noalias i8* @calloc(i32, i32)
+  declare noalias ptr @calloc(i32, i32)
 
-  define internal i32 @test(i32 %argc, i8** nocapture %argv) {
+  define internal i32 @test(i32 %argc, ptr nocapture %argv) {
   entry:
     br label %bb42.i
 
@@ -21,15 +21,15 @@
     br label %bb42.i
 
   bb35.i:
-    %1 = call noalias i8* @calloc(i32 20, i32 1)
+    %1 = call noalias ptr @calloc(i32 20, i32 1)
     unreachable
 
   bb37.i:
-    %2 = call noalias i8* @calloc(i32 14, i32 1)
+    %2 = call noalias ptr @calloc(i32 14, i32 1)
     unreachable
 
   bb39.i:
-    %3 = call noalias i8* @calloc(i32 17, i32 1)
+    %3 = call noalias ptr @calloc(i32 17, i32 1)
     unreachable
 
   bb42.i:

diff  --git a/llvm/test/CodeGen/Thumb2/constant-islands-cbz.mir b/llvm/test/CodeGen/Thumb2/constant-islands-cbz.mir
index 082c00858b8ec..a72b57c8a87bb 100644
--- a/llvm/test/CodeGen/Thumb2/constant-islands-cbz.mir
+++ b/llvm/test/CodeGen/Thumb2/constant-islands-cbz.mir
@@ -2,14 +2,14 @@
 # RUN: llc -mtriple=thumbv7m-none-eabi -run-pass=arm-cp-islands -o - %s | FileCheck %s
 
 --- |
-  define i32* @test_simple(i32* %x, i32 %y) { ret i32* %x }
-  define i32* @test_notfirst(i32* %x, i32 %y) { ret i32* %x }
-  define i32* @test_redefined(i32* %x, i32 %y) { ret i32* %x }
-  define i32* @test_notredefined(i32* %x, i32 %y) { ret i32* %x }
-  define i32* @test_notcmp(i32* %x, i32 %y) { ret i32* %x }
-  define i32* @test_killflag_1(i32* %x, i32 %y) { ret i32* %x }
-  define i32* @test_killflag_2(i32* %x, i32 %y) { ret i32* %x }
-  define i32* @test_cpsr(i32* %x, i32 %y) { ret i32* %x }
+  define ptr @test_simple(ptr %x, i32 %y) { ret ptr %x }
+  define ptr @test_notfirst(ptr %x, i32 %y) { ret ptr %x }
+  define ptr @test_redefined(ptr %x, i32 %y) { ret ptr %x }
+  define ptr @test_notredefined(ptr %x, i32 %y) { ret ptr %x }
+  define ptr @test_notcmp(ptr %x, i32 %y) { ret ptr %x }
+  define ptr @test_killflag_1(ptr %x, i32 %y) { ret ptr %x }
+  define ptr @test_killflag_2(ptr %x, i32 %y) { ret ptr %x }
+  define ptr @test_cpsr(ptr %x, i32 %y) { ret ptr %x }
 
   declare dso_local i32 @c(i32 %x)
 ...

diff  --git a/llvm/test/CodeGen/Thumb2/frame-index-addrmode-t2i8s4.mir b/llvm/test/CodeGen/Thumb2/frame-index-addrmode-t2i8s4.mir
index 6fc2e2ee03f8f..acab7e7002611 100644
--- a/llvm/test/CodeGen/Thumb2/frame-index-addrmode-t2i8s4.mir
+++ b/llvm/test/CodeGen/Thumb2/frame-index-addrmode-t2i8s4.mir
@@ -5,10 +5,10 @@
   define dso_local i64 @f() #0 {
   entry:
     %a = alloca [10 x i64], align 8
-    %arrayidx = getelementptr inbounds [10 x i64], [10 x i64]* %a, i32 0, i32 1
-    store volatile i64 1, i64* %arrayidx, align 8
-    %arrayidx1 = getelementptr inbounds [10 x i64], [10 x i64]* %a, i32 0, i32 1
-    %0 = load volatile i64, i64* %arrayidx1, align 8
+    %arrayidx = getelementptr inbounds [10 x i64], ptr %a, i32 0, i32 1
+    store volatile i64 1, ptr %arrayidx, align 8
+    %arrayidx1 = getelementptr inbounds [10 x i64], ptr %a, i32 0, i32 1
+    %0 = load volatile i64, ptr %arrayidx1, align 8
     ret i64 %0
   }
 

diff  --git a/llvm/test/CodeGen/Thumb2/high-reg-spill.mir b/llvm/test/CodeGen/Thumb2/high-reg-spill.mir
index 1cfb4b5cdb9bb..1cf58da63fe83 100644
--- a/llvm/test/CodeGen/Thumb2/high-reg-spill.mir
+++ b/llvm/test/CodeGen/Thumb2/high-reg-spill.mir
@@ -20,7 +20,7 @@
   define dso_local void @constraint_h() {
   entry:
     %i = alloca i32, align 4
-    %0 = load i32, i32* %i, align 4
+    %0 = load i32, ptr %i, align 4
     call void asm sideeffect "@ $0", "h,~{r12}"(i32 %0)
     ret void
   }

diff  --git a/llvm/test/CodeGen/Thumb2/ifcvt-dead-predicate.mir b/llvm/test/CodeGen/Thumb2/ifcvt-dead-predicate.mir
index afba351695db9..18b0fb6dc5526 100644
--- a/llvm/test/CodeGen/Thumb2/ifcvt-dead-predicate.mir
+++ b/llvm/test/CodeGen/Thumb2/ifcvt-dead-predicate.mir
@@ -8,15 +8,15 @@
   target triple = "thumbv7-unknown-linux-android16"
 
   ; Function Attrs: minsize nounwind optsize ssp uwtable
-  define hidden zeroext i1 @branch_entry(i32* %command_set, i8* %requested_filename, i8** %filename_to_use) local_unnamed_addr #0 {
+  define hidden zeroext i1 @branch_entry(ptr %command_set, ptr %requested_filename, ptr %filename_to_use) local_unnamed_addr #0 {
   entry:
-    %0 = load i32, i32* %command_set, align 4
+    %0 = load i32, ptr %command_set, align 4
     %and.i.i = and i32 %0, 128
     %tobool.i.i.not = icmp eq i32 %and.i.i, 0
     br i1 %tobool.i.i.not, label %land.end, label %land.rhs
 
   land.rhs:                                         ; preds = %entry
-    %call1 = tail call zeroext i1 @branch_target(i8* %requested_filename, i8** %filename_to_use)
+    %call1 = tail call zeroext i1 @branch_target(ptr %requested_filename, ptr %filename_to_use)
     ret i1 %call1
 
   land.end:                                         ; preds = %entry
@@ -24,7 +24,7 @@
   }
 
   ; Function Attrs: minsize optsize
-  declare zeroext i1 @branch_target(i8*, i8**) local_unnamed_addr #1
+  declare zeroext i1 @branch_target(ptr, ptr) local_unnamed_addr #1
 
   attributes #0 = { minsize nounwind optsize ssp uwtable }
   attributes #1 = { minsize optsize }

diff  --git a/llvm/test/CodeGen/Thumb2/m4-sched-ldr.mir b/llvm/test/CodeGen/Thumb2/m4-sched-ldr.mir
index 8bb3ec94785d3..8101f96196233 100644
--- a/llvm/test/CodeGen/Thumb2/m4-sched-ldr.mir
+++ b/llvm/test/CodeGen/Thumb2/m4-sched-ldr.mir
@@ -10,11 +10,11 @@
   target triple = "thumbv7em-arm-none-eabi"
 
   ; Function Attrs: norecurse nounwind optsize readonly
-  define dso_local i32 @test(i32* nocapture readonly %a, i32* nocapture readonly %b) local_unnamed_addr #0 {
+  define dso_local i32 @test(ptr nocapture readonly %a, ptr nocapture readonly %b) local_unnamed_addr #0 {
   entry:
-    %0 = load i32, i32* %a, align 4
+    %0 = load i32, ptr %a, align 4
     %add = add nsw i32 %0, 10
-    %1 = load i32, i32* %b, align 4
+    %1 = load i32, ptr %b, align 4
     %add1 = add nsw i32 %1, 20
     %mul = mul nsw i32 %add1, %add
     ret i32 %mul

diff  --git a/llvm/test/CodeGen/Thumb2/mve-postinc-distribute.mir b/llvm/test/CodeGen/Thumb2/mve-postinc-distribute.mir
index 623e28a1af1b2..2975cf81efae3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-distribute.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-distribute.mir
@@ -2,74 +2,74 @@
 # RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-prera-ldst-opt %s -o - -verify-machineinstrs | FileCheck %s
 
 --- |
-  define i32* @MVE_VLDRWU32(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHU16(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU8(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBS32(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU32(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHS32(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHU32(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBS16(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU16(i32* %x) { unreachable }
-  define i32* @MVE_VSTRWU32(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRHU16(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRBU8(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRH32(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRB32(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRB16(i32* %x, <4 x i32> %y) { unreachable }
-
-  define i32* @ld0ld4(i32* %x) { unreachable }
-  define i32* @ld4ld0(i32* %x) { unreachable }
-  define i32* @ld0ld4ld0(i32* %x) { unreachable }
-  define i32* @ld4ld0ld4(i32* %x) { unreachable }
-  define i32* @addload(i32* %x) { unreachable }
-  define i32* @sub(i32* %x) { unreachable }
-  define i32* @otherUse(i32* %x) { unreachable }
-  define i32* @postincUse(i32* %x) { unreachable }
-  define i32* @badScale(i32* %x) { unreachable }
-  define i32* @badRange(i32* %x) { unreachable }
-
-  define i32* @addUseOK(i32* %x) { unreachable }
-  define i32* @addUseDom(i32* %x) { unreachable }
-  define i32* @addUseKilled(i32* %x) { unreachable }
-
-  define i32* @MVE_VLDRWU32_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHU16_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU8_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBS32_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU32_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHS32_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHU32_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBS16_post(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU16_post(i32* %x) { unreachable }
-  define i32* @MVE_VSTRWU32_post(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRHU16_post(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRBU8_post(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRH32_post(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRB32_post(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRB16_post(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VLDRWU32_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHU16_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU8_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBS32_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU32_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHS32_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRHU32_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBS16_pre(i32* %x) { unreachable }
-  define i32* @MVE_VLDRBU16_pre(i32* %x) { unreachable }
-  define i32* @MVE_VSTRWU32_pre(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRHU16_pre(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRBU8_pre(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRH32_pre(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRB32_pre(i32* %x, <4 x i32> %y) { unreachable }
-  define i32* @MVE_VSTRB16_pre(i32* %x, <4 x i32> %y) { unreachable }
-
-  define i32* @multiple2(i32* %x) { unreachable }
-  define i32* @multiple3(i32* %x) { unreachable }
-  define i32* @multiple4(i32* %x) { unreachable }
-  define i32* @badScale2(i32* %x) { unreachable }
-  define i32* @badRange2(i32* %x) { unreachable }
-  define i32* @regtype(i32* %x) { unreachable }
+  define ptr @MVE_VLDRWU32(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHU16(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU8(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBS32(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU32(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHS32(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHU32(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBS16(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU16(ptr %x) { unreachable }
+  define ptr @MVE_VSTRWU32(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRHU16(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRBU8(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRH32(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRB32(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRB16(ptr %x, <4 x i32> %y) { unreachable }
+
+  define ptr @ld0ld4(ptr %x) { unreachable }
+  define ptr @ld4ld0(ptr %x) { unreachable }
+  define ptr @ld0ld4ld0(ptr %x) { unreachable }
+  define ptr @ld4ld0ld4(ptr %x) { unreachable }
+  define ptr @addload(ptr %x) { unreachable }
+  define ptr @sub(ptr %x) { unreachable }
+  define ptr @otherUse(ptr %x) { unreachable }
+  define ptr @postincUse(ptr %x) { unreachable }
+  define ptr @badScale(ptr %x) { unreachable }
+  define ptr @badRange(ptr %x) { unreachable }
+
+  define ptr @addUseOK(ptr %x) { unreachable }
+  define ptr @addUseDom(ptr %x) { unreachable }
+  define ptr @addUseKilled(ptr %x) { unreachable }
+
+  define ptr @MVE_VLDRWU32_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHU16_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU8_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBS32_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU32_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHS32_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHU32_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBS16_post(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU16_post(ptr %x) { unreachable }
+  define ptr @MVE_VSTRWU32_post(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRHU16_post(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRBU8_post(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRH32_post(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRB32_post(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRB16_post(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VLDRWU32_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHU16_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU8_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBS32_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU32_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHS32_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRHU32_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBS16_pre(ptr %x) { unreachable }
+  define ptr @MVE_VLDRBU16_pre(ptr %x) { unreachable }
+  define ptr @MVE_VSTRWU32_pre(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRHU16_pre(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRBU8_pre(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRH32_pre(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRB32_pre(ptr %x, <4 x i32> %y) { unreachable }
+  define ptr @MVE_VSTRB16_pre(ptr %x, <4 x i32> %y) { unreachable }
+
+  define ptr @multiple2(ptr %x) { unreachable }
+  define ptr @multiple3(ptr %x) { unreachable }
+  define ptr @multiple4(ptr %x) { unreachable }
+  define ptr @badScale2(ptr %x) { unreachable }
+  define ptr @badRange2(ptr %x) { unreachable }
+  define ptr @regtype(ptr %x) { unreachable }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Thumb2/mve-tp-loop.mir b/llvm/test/CodeGen/Thumb2/mve-tp-loop.mir
index 30e80f3033817..685df84f2aa26 100644
--- a/llvm/test/CodeGen/Thumb2/mve-tp-loop.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-tp-loop.mir
@@ -5,48 +5,43 @@
   target triple = "arm-arm-none-eabi"
 
   ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-  declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
+  declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
   ; Function Attrs: argmemonly nofree nosync nounwind willreturn writeonly
-  declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)
+  declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg)
 
-  define void @test1(i32* noalias %X, i32* noalias readonly %Y, i32 %n) {
+  define void @test1(ptr noalias %X, ptr noalias readonly %Y, i32 %n) {
   entry:
-    %0 = bitcast i32* %X to i8*
-    %1 = bitcast i32* %Y to i8*
-    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 %n, i1 false)
+    call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
     ret void
   }
 
-  define void @test2(i32* noalias %X, i32* noalias readonly %Y, i32 %n) {
+  define void @test2(ptr noalias %X, ptr noalias readonly %Y, i32 %n) {
   entry:
     %cmp6 = icmp sgt i32 %n, 0
     br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
 
   for.body.preheader:                               ; preds = %entry
-    %X.bits = bitcast i32* %X to i8*
-    %Y.bits = bitcast i32* %Y to i8*
-    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %X.bits, i8* align 4 %Y.bits, i32 %n, i1 false)
+    call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
     br label %for.cond.cleanup
 
   for.cond.cleanup:                                 ; preds = %for.body.preheader, %entry
     ret void
   }
 
-  define void @test3(i32* nocapture %X, i8 zeroext %c, i32 %n) {
+  define void @test3(ptr nocapture %X, i8 zeroext %c, i32 %n) {
   entry:
-    %0 = bitcast i32* %X to i8*
-    tail call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 %c, i32 %n, i1 false)
+    tail call void @llvm.memset.p0.i32(ptr align 4 %X, i8 %c, i32 %n, i1 false)
     ret void
   }
 
 
-  define void @test4(i8* nocapture %X, i8 zeroext %c, i32 %n) {
+  define void @test4(ptr nocapture %X, i8 zeroext %c, i32 %n) {
   entry:
     %cmp4 = icmp sgt i32 %n, 0
     br i1 %cmp4, label %for.body.preheader, label %for.cond.cleanup
 
   for.body.preheader:                               ; preds = %entry
-    call void @llvm.memset.p0i8.i32(i8* align 1 %X, i8 %c, i32 %n, i1 false)
+    call void @llvm.memset.p0.i32(ptr align 1 %X, i8 %c, i32 %n, i1 false)
     br label %for.cond.cleanup
 
   for.cond.cleanup:                                 ; preds = %for.body.preheader, %entry

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir
index 3fa96947ea948..e583df903d3ed 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir
@@ -4,19 +4,19 @@
 --- |
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8.1m.main-arm-unknown-eabihf"
-  define dso_local <4 x i32> @foo(<4 x i32>* %src, <4 x i32>* %src2, <4 x i32>* %src3, <4 x i32>* %dest, <4 x i32>* %dest2, <4 x i32>* %dest3, <4 x float> %a1) local_unnamed_addr #0 {
+  define dso_local <4 x i32> @foo(ptr %src, ptr %src2, ptr %src3, ptr %dest, ptr %dest2, ptr %dest3, <4 x float> %a1) local_unnamed_addr #0 {
   entry:
     %c = fcmp one <4 x float> %a1, zeroinitializer
-    %w = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %src, i32 4, <4 x i1> %c, <4 x i32> undef)
-    tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %w, <4 x i32>* %dest, i32 4, <4 x i1> %c)
-    %w2 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %src2, i32 4, <4 x i1> %c, <4 x i32> undef)
-    tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %w2, <4 x i32>* %dest2, i32 4, <4 x i1> %c)
-    %w3 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %src3, i32 4, <4 x i1> %c, <4 x i32> undef)
-    tail call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %w3, <4 x i32>* %dest3, i32 4, <4 x i1> %c)
+    %w = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %src, i32 4, <4 x i1> %c, <4 x i32> undef)
+    tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %w, ptr %dest, i32 4, <4 x i1> %c)
+    %w2 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %src2, i32 4, <4 x i1> %c, <4 x i32> undef)
+    tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %w2, ptr %dest2, i32 4, <4 x i1> %c)
+    %w3 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %src3, i32 4, <4 x i1> %c, <4 x i32> undef)
+    tail call void @llvm.masked.store.v4i32.p0(<4 x i32> %w3, ptr %dest3, i32 4, <4 x i1> %c)
     ret <4 x i32> %w3
   }
-  declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #2
-  declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #3
+  declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) #2
+  declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) #3
 
   attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+fp-armv8d16sp,+fp16,+fpregs,+fullfp16,+hwdiv,+lob,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2sp,+vfp3d16sp,+vfp4d16sp" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/Thumb2/mve-wls-block-placement.mir b/llvm/test/CodeGen/Thumb2/mve-wls-block-placement.mir
index 9b39d469fff86..744b1b8fe22dd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-wls-block-placement.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-wls-block-placement.mir
@@ -3,39 +3,39 @@
 --- |
 
   ; Checks that Predecessor gets moved (to before the LoopExit) if it contains a backward WLS.
-  define void @backwards_branch(i32 %N, i32* nocapture %a, i32* nocapture readonly %b)   {
+  define void @backwards_branch(i32 %N, ptr nocapture %a, ptr nocapture readonly %b)   {
   entry:
     unreachable
   }
 
   ; Checks that Predecessor (containing a backwards WLS) does not get moved to before the loopExit if it is the entry block.
-  define void @backwards_branch_entry_block(i32 %N, i32* nocapture %a, i32* nocapture readonly %b)   {
+  define void @backwards_branch_entry_block(i32 %N, ptr nocapture %a, ptr nocapture readonly %b)   {
   entry:
     unreachable
   }
 
   ; Checks that Predecessor (to which a forward WLS exists) is not moved if moving it would cause the WLS to become backwards branching.
-  define void @backwards_branch_backwards_wls(i32 %N, i32 %M, i32* nocapture %a, i32* nocapture %b, i32* nocapture %c)   {
+  define void @backwards_branch_backwards_wls(i32 %N, i32 %M, ptr nocapture %a, ptr nocapture %b, ptr nocapture %c)   {
   entry:
     unreachable
   }
 
   ; Checks that a MachineFunction is unaffected if it doesn't contain any WLS (pseudo) instruction.
-  define void @no_predecessor(i32 %N, i32 %M, i32* nocapture %a, i32* nocapture %b, i32* nocapture %c)   {
+  define void @no_predecessor(i32 %N, i32 %M, ptr nocapture %a, ptr nocapture %b, ptr nocapture %c)   {
   entry:
     unreachable
   }
 
   ; Within a nested (Both the WLS and loopExit are at depth=3 here) loop, checks that Predecessor
   ; gets moved (in backward direction) if there exists a backdwards WLS from it to the LoopExit.
-  define void @nested_loops(i32 %n, i32 %m, i32 %l, i8* noalias %X, i8* noalias %Y)   {
+  define void @nested_loops(i32 %n, i32 %m, i32 %l, ptr noalias %X, ptr noalias %Y)   {
   entry:
     unreachable
   }
 
   ; Checks that Predecessor (to which a forward WLS exists) is moved if moving it would NOT cause the WLS
   ; to become backwards branching.
-  define void @backwards_branch_forwards_wls(i32 %N, i32 %M, i32* nocapture %a, i32* nocapture %b, i32* nocapture %c) {
+  define void @backwards_branch_forwards_wls(i32 %N, i32 %M, ptr nocapture %a, ptr nocapture %b, ptr nocapture %c) {
   entry:
     unreachable
   }

diff  --git a/llvm/test/CodeGen/Thumb2/phi_prevent_copy.mir b/llvm/test/CodeGen/Thumb2/phi_prevent_copy.mir
index 1ca7f92fdc48e..201972fae8cb0 100644
--- a/llvm/test/CodeGen/Thumb2/phi_prevent_copy.mir
+++ b/llvm/test/CodeGen/Thumb2/phi_prevent_copy.mir
@@ -6,13 +6,13 @@
   target triple = "arm-arm-none-eabi"
 
   ; Function Attrs: nofree norecurse nounwind
-  define void @test(i8* noalias nocapture %X, i8* noalias nocapture readonly %Y, i32 %n) {
+  define void @test(ptr noalias nocapture %X, ptr noalias nocapture readonly %Y, i32 %n) {
   entry:
     %cmp6 = icmp sgt i32 %n, 0
     br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
 
   for.body.preheader:                               ; preds = %entry
-    call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %X, i8* align 4 %Y, i32 %n, i1 false)
+    call void @llvm.memcpy.p0.p0.i32(ptr align 4 %X, ptr align 4 %Y, i32 %n, i1 false)
     br label %for.cond.cleanup
 
   for.cond.cleanup:                                 ; preds = %for.body.preheader, %entry
@@ -20,7 +20,7 @@
   }
 
   ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-  declare void @llvm.memcpy.p0i8.p0i8.i32(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i32, i1 immarg)
+  declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir b/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir
index 59758c1d3747c..522120559c8b0 100644
--- a/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir
+++ b/llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir
@@ -2,30 +2,30 @@
 # RUN: llc -mtriple=thumbv7m-none-eabi -mcpu=cortex-m7 -run-pass=pipeliner --pipeliner-force-issue-width=10 -o - %s | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define hidden float @dot(float* nocapture noundef readonly %a, float* nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
+  define hidden float @dot(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
   entry:
     %cmp8 = icmp sgt i32 %sz, 0
     br i1 %cmp8, label %for.body.preheader, label %for.end
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr float, float* %b, i32 -1
-    %scevgep4 = getelementptr float, float* %a, i32 -1
+    %scevgep = getelementptr float, ptr %b, i32 -1
+    %scevgep4 = getelementptr float, ptr %a, i32 -1
     br label %for.body
 
   for.body:                                         ; preds = %for.body.preheader, %for.body
-    %lsr.iv5 = phi float* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi float* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %lsr.iv = phi i32 [ %sz, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
     %sum.010 = phi float [ %add, %for.body ], [ 0.000000e+00, %for.body.preheader ]
-    %scevgep7 = getelementptr float, float* %lsr.iv5, i32 1
-    %0 = load float, float* %scevgep7, align 4
-    %scevgep3 = getelementptr float, float* %lsr.iv1, i32 1
-    %1 = load float, float* %scevgep3, align 4
+    %scevgep7 = getelementptr float, ptr %lsr.iv5, i32 1
+    %0 = load float, ptr %scevgep7, align 4
+    %scevgep3 = getelementptr float, ptr %lsr.iv1, i32 1
+    %1 = load float, ptr %scevgep3, align 4
     %mul = fmul fast float %1, %0
     %add = fadd fast float %mul, %sum.010
     %lsr.iv.next = add i32 %lsr.iv, -1
-    %scevgep2 = getelementptr float, float* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr float, float* %lsr.iv5, i32 1
+    %scevgep2 = getelementptr float, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr float, ptr %lsr.iv5, i32 1
     %exitcond.not = icmp ne i32 %lsr.iv.next, 0
     br i1 %exitcond.not, label %for.body, label %for.end, !llvm.loop !0
 

diff  --git a/llvm/test/CodeGen/Thumb2/postinc-distribute.mir b/llvm/test/CodeGen/Thumb2/postinc-distribute.mir
index 19fb089509dc4..c54d2bb4209ab 100644
--- a/llvm/test/CodeGen/Thumb2/postinc-distribute.mir
+++ b/llvm/test/CodeGen/Thumb2/postinc-distribute.mir
@@ -2,31 +2,31 @@
 # RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -run-pass arm-prera-ldst-opt %s -o - -verify-machineinstrs | FileCheck %s
 
 --- |
-  define i32* @t2LDRi12(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRHi12(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRSHi12(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRBi12(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRSBi12(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRi12(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRHi12(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRBi12(i32* %x, i32 %y) { unreachable }
-
-  define i32* @storedadd(i32* %x, i32 %y) { unreachable }
-  define i32* @minsize2(i32* %x, i32 %y) minsize optsize { unreachable }
-  define i32* @minsize3(i32* %x, i32 %y) minsize optsize { unreachable }
-
-  define i32* @t2LDRi12_posoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRHi12_posoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRBi12_posoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRi12_posoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRHi12_posoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRBi12_posoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRi12_negoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRHi12_negoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2LDRBi12_negoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRi12_negoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRHi12_negoff(i32* %x, i32 %y) { unreachable }
-  define i32* @t2STRBi12_negoff(i32* %x, i32 %y) { unreachable }
+  define ptr @t2LDRi12(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRHi12(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRSHi12(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRBi12(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRSBi12(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRi12(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRHi12(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRBi12(ptr %x, i32 %y) { unreachable }
+
+  define ptr @storedadd(ptr %x, i32 %y) { unreachable }
+  define ptr @minsize2(ptr %x, i32 %y) minsize optsize { unreachable }
+  define ptr @minsize3(ptr %x, i32 %y) minsize optsize { unreachable }
+
+  define ptr @t2LDRi12_posoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRHi12_posoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRBi12_posoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRi12_posoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRHi12_posoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRBi12_posoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRi12_negoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRHi12_negoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2LDRBi12_negoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRi12_negoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRHi12_negoff(ptr %x, i32 %y) { unreachable }
+  define ptr @t2STRBi12_negoff(ptr %x, i32 %y) { unreachable }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Thumb2/store-prepostinc.mir b/llvm/test/CodeGen/Thumb2/store-prepostinc.mir
index aa46dc607148b..18a47f7aee026 100644
--- a/llvm/test/CodeGen/Thumb2/store-prepostinc.mir
+++ b/llvm/test/CodeGen/Thumb2/store-prepostinc.mir
@@ -5,41 +5,41 @@
   target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv7m-none-unknown-eabi"
 
-  define i8* @STR_pre4(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre8(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre255(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_pre256(i8* %p, i32 %v) { unreachable }
-
-  define i8* @STRD_pre4(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_pre8(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_pre255(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_pre256(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_pre1020(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_pre1024(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_prem4(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_prem8(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_prem255(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_prem256(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_prem1020(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_prem1024(i8* %p, i32 %v) { unreachable }
-
-  define i8* @STR_post4(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post8(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post255(i8* %p, i32 %v) { unreachable }
-  define i8* @STR_post256(i8* %p, i32 %v) { unreachable }
-
-  define i8* @STRD_post4(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_post8(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_post255(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_post256(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_post1020(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_post1024(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_postm4(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_postm8(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_postm255(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_postm256(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_postm1020(i8* %p, i32 %v) { unreachable }
-  define i8* @STRD_postm1024(i8* %p, i32 %v) { unreachable }
+  define ptr @STR_pre4(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre8(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre255(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_pre256(ptr %p, i32 %v) { unreachable }
+
+  define ptr @STRD_pre4(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_pre8(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_pre255(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_pre256(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_pre1020(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_pre1024(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_prem4(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_prem8(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_prem255(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_prem256(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_prem1020(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_prem1024(ptr %p, i32 %v) { unreachable }
+
+  define ptr @STR_post4(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post8(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post255(ptr %p, i32 %v) { unreachable }
+  define ptr @STR_post256(ptr %p, i32 %v) { unreachable }
+
+  define ptr @STRD_post4(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_post8(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_post255(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_post256(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_post1020(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_post1024(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_postm4(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_postm8(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_postm255(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_postm256(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_postm1020(ptr %p, i32 %v) { unreachable }
+  define ptr @STRD_postm1024(ptr %p, i32 %v) { unreachable }
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir b/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir
index fd8aa9d5d00c4..ba1004515fd54 100644
--- a/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir
@@ -2,30 +2,30 @@
 # RUN: llc -mtriple=thumbv7m-none-eabi -mcpu=cortex-m7 -run-pass=pipeliner --pipeliner-force-issue-width=10 -o - %s | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define hidden float @dot(float* nocapture noundef readonly %a, float* nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
+  define hidden float @dot(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
   entry:
     %cmp8 = icmp sgt i32 %sz, 0
     br i1 %cmp8, label %for.body.preheader, label %for.end
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr float, float* %b, i32 -1
-    %scevgep4 = getelementptr float, float* %a, i32 -1
+    %scevgep = getelementptr float, ptr %b, i32 -1
+    %scevgep4 = getelementptr float, ptr %a, i32 -1
     br label %for.body
 
   for.body:                                         ; preds = %for.body.preheader, %for.body
-    %lsr.iv5 = phi float* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi float* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %lsr.iv = phi i32 [ %sz, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
     %sum.010 = phi float [ %add, %for.body ], [ 0.000000e+00, %for.body.preheader ]
-    %scevgep7 = getelementptr float, float* %lsr.iv5, i32 1
-    %0 = load float, float* %scevgep7, align 4
-    %scevgep3 = getelementptr float, float* %lsr.iv1, i32 1
-    %1 = load float, float* %scevgep3, align 4
+    %scevgep7 = getelementptr float, ptr %lsr.iv5, i32 1
+    %0 = load float, ptr %scevgep7, align 4
+    %scevgep3 = getelementptr float, ptr %lsr.iv1, i32 1
+    %1 = load float, ptr %scevgep3, align 4
     %mul = fmul fast float %1, %0
     %add = fadd fast float %mul, %sum.010
     %lsr.iv.next = add i32 %lsr.iv, -1
-    %scevgep2 = getelementptr float, float* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr float, float* %lsr.iv5, i32 1
+    %scevgep2 = getelementptr float, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr float, ptr %lsr.iv5, i32 1
     %exitcond.not = icmp ne i32 %lsr.iv.next, 0
     br i1 %exitcond.not, label %for.body, label %for.end, !llvm.loop !0
 

diff  --git a/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir b/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir
index 56a4426e7beb1..854c5b8249328 100644
--- a/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir
@@ -2,30 +2,30 @@
 # RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mcpu=cortex-m85 -mattr=+use-mipipeliner -run-pass=pipeliner --pipeliner-force-issue-width=10 -o - %s | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define hidden float @dot(float* nocapture noundef readonly %a, float* nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
+  define hidden float @dot(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
   entry:
     %cmp8 = icmp sgt i32 %sz, 0
     br i1 %cmp8, label %for.body.preheader, label %for.end
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr float, float* %b, i32 -1
-    %scevgep4 = getelementptr float, float* %a, i32 -1
+    %scevgep = getelementptr float, ptr %b, i32 -1
+    %scevgep4 = getelementptr float, ptr %a, i32 -1
     br label %for.body
 
   for.body:                                         ; preds = %for.body.preheader, %for.body
-    %lsr.iv5 = phi float* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi float* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %lsr.iv = phi i32 [ %sz, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
     %sum.010 = phi float [ %add, %for.body ], [ 0.000000e+00, %for.body.preheader ]
-    %scevgep7 = getelementptr float, float* %lsr.iv5, i32 1
-    %0 = load float, float* %scevgep7, align 4
-    %scevgep3 = getelementptr float, float* %lsr.iv1, i32 1
-    %1 = load float, float* %scevgep3, align 4
+    %scevgep7 = getelementptr float, ptr %lsr.iv5, i32 1
+    %0 = load float, ptr %scevgep7, align 4
+    %scevgep3 = getelementptr float, ptr %lsr.iv1, i32 1
+    %1 = load float, ptr %scevgep3, align 4
     %mul = fmul fast float %1, %0
     %add = fadd fast float %mul, %sum.010
     %lsr.iv.next = add i32 %lsr.iv, -1
-    %scevgep2 = getelementptr float, float* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr float, float* %lsr.iv5, i32 1
+    %scevgep2 = getelementptr float, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr float, ptr %lsr.iv5, i32 1
     %exitcond.not = icmp eq i32 %lsr.iv.next, 0
     br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
 

diff  --git a/llvm/test/CodeGen/Thumb2/swp-fixedii.mir b/llvm/test/CodeGen/Thumb2/swp-fixedii.mir
index e65abb789d800..dd02703c4b2a3 100644
--- a/llvm/test/CodeGen/Thumb2/swp-fixedii.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-fixedii.mir
@@ -2,30 +2,30 @@
 # RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mcpu=cortex-m85 -mattr=+use-mipipeliner -run-pass=pipeliner --pipeliner-force-issue-width=10 -o - %s | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define hidden float @dot(float* nocapture noundef readonly %a, float* nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
+  define hidden float @dot(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
   entry:
     %cmp8 = icmp sgt i32 %sz, 0
     br i1 %cmp8, label %for.body.preheader, label %for.end
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr float, float* %b, i32 -1
-    %scevgep4 = getelementptr float, float* %a, i32 -1
+    %scevgep = getelementptr float, ptr %b, i32 -1
+    %scevgep4 = getelementptr float, ptr %a, i32 -1
     br label %for.body
 
   for.body:                                         ; preds = %for.body.preheader, %for.body
-    %lsr.iv5 = phi float* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi float* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %lsr.iv = phi i32 [ %sz, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
     %sum.010 = phi float [ %add, %for.body ], [ 0.000000e+00, %for.body.preheader ]
-    %scevgep7 = getelementptr float, float* %lsr.iv5, i32 1
-    %0 = load float, float* %scevgep7, align 4
-    %scevgep3 = getelementptr float, float* %lsr.iv1, i32 1
-    %1 = load float, float* %scevgep3, align 4
+    %scevgep7 = getelementptr float, ptr %lsr.iv5, i32 1
+    %0 = load float, ptr %scevgep7, align 4
+    %scevgep3 = getelementptr float, ptr %lsr.iv1, i32 1
+    %1 = load float, ptr %scevgep3, align 4
     %mul = fmul fast float %1, %0
     %add = fadd fast float %mul, %sum.010
     %lsr.iv.next = add i32 %lsr.iv, -1
-    %scevgep2 = getelementptr float, float* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr float, float* %lsr.iv5, i32 1
+    %scevgep2 = getelementptr float, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr float, ptr %lsr.iv5, i32 1
     %exitcond.not = icmp eq i32 %lsr.iv.next, 0
     br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
 

diff  --git a/llvm/test/CodeGen/Thumb2/swp-regpressure.mir b/llvm/test/CodeGen/Thumb2/swp-regpressure.mir
index f755fbfccf9db..2bcb0c92909e5 100644
--- a/llvm/test/CodeGen/Thumb2/swp-regpressure.mir
+++ b/llvm/test/CodeGen/Thumb2/swp-regpressure.mir
@@ -6,30 +6,30 @@
 # but without register pressure is not rejected.
 
 --- |
-  define hidden float @high_pressure(float* nocapture noundef readonly %a, float* nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
+  define hidden float @high_pressure(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
   entry:
     %cmp8 = icmp sgt i32 %sz, 0
     br i1 %cmp8, label %for.body.preheader, label %for.end
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr float, float* %b, i32 -1
-    %scevgep4 = getelementptr float, float* %a, i32 -1
+    %scevgep = getelementptr float, ptr %b, i32 -1
+    %scevgep4 = getelementptr float, ptr %a, i32 -1
     br label %for.body
 
   for.body:                                         ; preds = %for.body.preheader, %for.body
-    %lsr.iv5 = phi float* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi float* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %lsr.iv = phi i32 [ %sz, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
     %sum.010 = phi float [ %add, %for.body ], [ 0.000000e+00, %for.body.preheader ]
-    %scevgep7 = getelementptr float, float* %lsr.iv5, i32 1
-    %0 = load float, float* %scevgep7, align 4
-    %scevgep3 = getelementptr float, float* %lsr.iv1, i32 1
-    %1 = load float, float* %scevgep3, align 4
+    %scevgep7 = getelementptr float, ptr %lsr.iv5, i32 1
+    %0 = load float, ptr %scevgep7, align 4
+    %scevgep3 = getelementptr float, ptr %lsr.iv1, i32 1
+    %1 = load float, ptr %scevgep3, align 4
     %mul = fmul fast float %1, %0
     %add = fadd fast float %mul, %sum.010
     %lsr.iv.next = add i32 %lsr.iv, -1
-    %scevgep2 = getelementptr float, float* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr float, float* %lsr.iv5, i32 1
+    %scevgep2 = getelementptr float, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr float, ptr %lsr.iv5, i32 1
     %exitcond.not = icmp eq i32 %lsr.iv.next, 0
     br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
 
@@ -44,30 +44,30 @@
   !3 = !{!"llvm.loop.pipeline.initiationinterval", i32 3}
 
 
-  define hidden float @low_pressure(float* nocapture noundef readonly %a, float* nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
+  define hidden float @low_pressure(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %sz) local_unnamed_addr #0 {
   entry:
     %cmp8 = icmp sgt i32 %sz, 0
     br i1 %cmp8, label %for.body.preheader, label %for.end
 
   for.body.preheader:                               ; preds = %entry
-    %scevgep = getelementptr float, float* %b, i32 -1
-    %scevgep4 = getelementptr float, float* %a, i32 -1
+    %scevgep = getelementptr float, ptr %b, i32 -1
+    %scevgep4 = getelementptr float, ptr %a, i32 -1
     br label %for.body
 
   for.body:                                         ; preds = %for.body.preheader, %for.body
-    %lsr.iv5 = phi float* [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
-    %lsr.iv1 = phi float* [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
+    %lsr.iv5 = phi ptr [ %scevgep4, %for.body.preheader ], [ %scevgep6, %for.body ]
+    %lsr.iv1 = phi ptr [ %scevgep, %for.body.preheader ], [ %scevgep2, %for.body ]
     %lsr.iv = phi i32 [ %sz, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
     %sum.010 = phi float [ %add, %for.body ], [ 0.000000e+00, %for.body.preheader ]
-    %scevgep7 = getelementptr float, float* %lsr.iv5, i32 1
-    %0 = load float, float* %scevgep7, align 4
-    %scevgep3 = getelementptr float, float* %lsr.iv1, i32 1
-    %1 = load float, float* %scevgep3, align 4
+    %scevgep7 = getelementptr float, ptr %lsr.iv5, i32 1
+    %0 = load float, ptr %scevgep7, align 4
+    %scevgep3 = getelementptr float, ptr %lsr.iv1, i32 1
+    %1 = load float, ptr %scevgep3, align 4
     %mul = fmul fast float %1, %0
     %add = fadd fast float %mul, %sum.010
     %lsr.iv.next = add i32 %lsr.iv, -1
-    %scevgep2 = getelementptr float, float* %lsr.iv1, i32 1
-    %scevgep6 = getelementptr float, float* %lsr.iv5, i32 1
+    %scevgep2 = getelementptr float, ptr %lsr.iv1, i32 1
+    %scevgep6 = getelementptr float, ptr %lsr.iv5, i32 1
     %exitcond.not = icmp eq i32 %lsr.iv.next, 0
     br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !4
 

diff  --git a/llvm/test/CodeGen/Thumb2/tbb-removeadd.mir b/llvm/test/CodeGen/Thumb2/tbb-removeadd.mir
index 44fd45d8967ac..9fe555ddab745 100644
--- a/llvm/test/CodeGen/Thumb2/tbb-removeadd.mir
+++ b/llvm/test/CodeGen/Thumb2/tbb-removeadd.mir
@@ -6,7 +6,7 @@
   target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
   target triple = "thumbv8r-arm-none-eabi"
   
-  define void @Func(i32 %i, i32* nocapture %p) local_unnamed_addr {
+  define void @Func(i32 %i, ptr nocapture %p) local_unnamed_addr {
   entry:
     switch i32 %i, label %sw.epilog [
       i32 0, label %sw.bb
@@ -19,7 +19,7 @@
     br label %sw.epilog.sink.split
   
   sw.bb1:                                           ; preds = %entry
-    store i32 0, i32* %p, align 4
+    store i32 0, ptr %p, align 4
     br label %sw.epilog.sink.split
   
   sw.bb3:                                           ; preds = %entry
@@ -27,7 +27,7 @@
   
   sw.epilog.sink.split:                             ; preds = %sw.bb3, %sw.bb1, %sw.bb, %entry
     %.sink = phi i32 [ 2, %sw.bb3 ], [ 0, %sw.bb ], [ 1, %entry ], [ 1, %sw.bb1 ]
-    store i32 %.sink, i32* %p, align 4
+    store i32 %.sink, ptr %p, align 4
     br label %sw.epilog
   
   sw.epilog:                                        ; preds = %sw.epilog.sink.split, %entry

diff  --git a/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.mir b/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.mir
index 873273db45415..0386410d1b612 100644
--- a/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.mir
+++ b/llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.mir
@@ -5,10 +5,10 @@
 
   declare i32 @__gxx_wasm_personality_v0(...)
   declare void @foo()
-  define void @rethrow_arg_test() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+  define void @rethrow_arg_test() personality ptr @__gxx_wasm_personality_v0 {
     ret void
   }
-  define i32 @fix_end_function_test() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+  define i32 @fix_end_function_test() personality ptr @__gxx_wasm_personality_v0 {
     ret i32 0
   }
 ...

diff  --git a/llvm/test/CodeGen/WebAssembly/exception.mir b/llvm/test/CodeGen/WebAssembly/exception.mir
index a83fe0285c3d3..895e8d8864ea2 100644
--- a/llvm/test/CodeGen/WebAssembly/exception.mir
+++ b/llvm/test/CodeGen/WebAssembly/exception.mir
@@ -5,13 +5,13 @@
 
   declare i32 @__gxx_wasm_personality_v0(...)
   declare void @foo()
-  define void @eh_label_test() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+  define void @eh_label_test() personality ptr @__gxx_wasm_personality_v0 {
     ret void
   }
-  define void @unreachable_ehpad_test() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+  define void @unreachable_ehpad_test() personality ptr @__gxx_wasm_personality_v0 {
     ret void
   }
-  define void @empty_cleanuppad_test() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+  define void @empty_cleanuppad_test() personality ptr @__gxx_wasm_personality_v0 {
     ret void
   }
 ...

diff  --git a/llvm/test/CodeGen/WebAssembly/function-info.mir b/llvm/test/CodeGen/WebAssembly/function-info.mir
index 1569884f6d97b..2971d234c9b2d 100644
--- a/llvm/test/CodeGen/WebAssembly/function-info.mir
+++ b/llvm/test/CodeGen/WebAssembly/function-info.mir
@@ -8,7 +8,7 @@
   define void @function_property_test() {
     ret void
   }
-  define void @wasm_eh_info_test() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+  define void @wasm_eh_info_test() personality ptr @__gxx_wasm_personality_v0 {
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir b/llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir
index 8da2b78d7944d..84fc47a3a9120 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir
+++ b/llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir
@@ -14,15 +14,15 @@
     br i1 %tobool.not, label %if.else, label %if.then
 
   if.then:                                          ; preds = %entry
-    %0 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-    %1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
-    %2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32)
+    %0 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, ptr @buf, i64 32)
+    %1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, ptr @buf, i64 32)
+    %2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, ptr @buf, i64 32)
     br label %if.end
 
   if.else:                                          ; preds = %entry
-    %3 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-    %4 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
-    %5 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf2, i64 0, i64 0), i64 32)
+    %3 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 8, ptr @buf2, i64 32)
+    %4 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %col, ptr @buf2, i64 32)
+    %5 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, ptr @buf2, i64 32)
     br label %if.end
 
   if.end:                                           ; preds = %if.else, %if.then
@@ -30,13 +30,13 @@
     %b.sroa.1069.0.in = phi x86_amx [ %4, %if.else ], [ %1, %if.then ]
     %c.sroa.1044.0.in = phi x86_amx [ %5, %if.else ], [ %2, %if.then ]
     %6 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %row, i16 %col, i16 8, x86_amx %c.sroa.1044.0.in, x86_amx %a.sroa.1094.0.in, x86_amx %b.sroa.1069.0.in)
-    tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @buf, i64 0, i64 0), i64 32, x86_amx %6)
+    tail call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, ptr @buf, i64 32, x86_amx %6)
     ret void
   }
 
-  declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) #1
+  declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64) #1
   declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) #1
-  declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) #1
+  declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx) #1
 
   attributes #0 = { "target-features"="+amx-int8,+avx512f" }
   attributes #1 = { nounwind "target-features"="+amx-int8,+avx512f" }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
index c2d0646d76e6f..32afcb2db4dad 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
@@ -26,8 +26,8 @@
     ret i32 %res
   }
 
-  define i32 @test_cmp_p0(i32* %a, i32* %b) {
-    %r = icmp ult i32* %a, %b
+  define i32 @test_cmp_p0(ptr %a, ptr %b) {
+    %r = icmp ult ptr %a, %b
     %res = zext i1 %r to i32
     ret i32 %res
   }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-ptr-add.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-ptr-add.mir
index 2a080117342dc..77ed64a29b896 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-ptr-add.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-ptr-add.mir
@@ -2,23 +2,23 @@
 # RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s
 
 --- |
-  define void @test_gep_i8(i8* %addr) {
-    %arrayidx = getelementptr i32, i32* undef, i8 5
+  define void @test_gep_i8(ptr %addr) {
+    %arrayidx = getelementptr i32, ptr undef, i8 5
     ret void
   }
 
-  define void @test_gep_i16(i8* %addr) {
-    %arrayidx = getelementptr i32, i32* undef, i16 5
+  define void @test_gep_i16(ptr %addr) {
+    %arrayidx = getelementptr i32, ptr undef, i16 5
     ret void
   }
 
-  define void @test_gep_i32(i8* %addr) {
-    %arrayidx = getelementptr i32, i32* undef, i32 5
+  define void @test_gep_i32(ptr %addr) {
+    %arrayidx = getelementptr i32, ptr undef, i32 5
     ret void
   }
 
-  define void @test_gep_i64(i8* %addr) {
-    %arrayidx = getelementptr i32, i32* undef, i64 5
+  define void @test_gep_i64(ptr %addr) {
+    %arrayidx = getelementptr i32, ptr undef, i64 5
     ret void
   }
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
index 5b00e48453f9c..1d280e9e4bd11 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
@@ -14,13 +14,13 @@
     ret void
   }
 
-  define <8 x i32> @test_load_v8i32_noalign(<8 x i32>* %p1) {
-    %r = load <8 x i32>, <8 x i32>* %p1, align 1
+  define <8 x i32> @test_load_v8i32_noalign(ptr %p1) {
+    %r = load <8 x i32>, ptr %p1, align 1
     ret <8 x i32> %r
   }
 
-  define void @test_store_v8i32_noalign(<8 x i32> %val, <8 x i32>* %p1) {
-    store <8 x i32> %val, <8 x i32>* %p1, align 1
+  define void @test_store_v8i32_noalign(<8 x i32> %val, ptr %p1) {
+    store <8 x i32> %val, ptr %p1, align 1
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
index 6fd49d06203a2..2f8827c7ff906 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
@@ -15,13 +15,13 @@
     ret void
   }
 
-  define <16 x i32> @test_load_v16i32_noalign(<16 x i32>* %p1) {
-    %r = load <16 x i32>, <16 x i32>* %p1, align 1
+  define <16 x i32> @test_load_v16i32_noalign(ptr %p1) {
+    %r = load <16 x i32>, ptr %p1, align 1
     ret <16 x i32> %r
   }
 
-  define void @test_store_v16i32_noalign(<16 x i32> %val, <16 x i32>* %p1) {
-    store <16 x i32> %val, <16 x i32>* %p1, align 1
+  define void @test_store_v16i32_noalign(<16 x i32> %val, ptr %p1) {
+    store <16 x i32> %val, ptr %p1, align 1
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index cbc3b57fc7746..c2dcf30359248 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -66,59 +66,59 @@
     ret <4 x float> %ret
   }
 
-  define i8 @test_load_i8(i8* %p1) {
-    %r = load i8, i8* %p1
+  define i8 @test_load_i8(ptr %p1) {
+    %r = load i8, ptr %p1
     ret i8 %r
   }
 
-  define i16 @test_load_i16(i16* %p1) {
-    %r = load i16, i16* %p1
+  define i16 @test_load_i16(ptr %p1) {
+    %r = load i16, ptr %p1
     ret i16 %r
   }
 
-  define i32 @test_load_i32(i32* %p1) {
-    %r = load i32, i32* %p1
+  define i32 @test_load_i32(ptr %p1) {
+    %r = load i32, ptr %p1
     ret i32 %r
   }
 
-  define i64 @test_load_i64(i64* %p1) {
-    %r = load i64, i64* %p1
+  define i64 @test_load_i64(ptr %p1) {
+    %r = load i64, ptr %p1
     ret i64 %r
   }
 
-  define float @test_load_float(float* %p1) {
-    %r = load float, float* %p1
+  define float @test_load_float(ptr %p1) {
+    %r = load float, ptr %p1
     ret float %r
   }
 
-  define double @test_load_double(double* %p1) {
-    %r = load double, double* %p1
+  define double @test_load_double(ptr %p1) {
+    %r = load double, ptr %p1
     ret double %r
   }
 
-  define <4 x i32> @test_load_v4i32(<4 x i32>* %p1) {
-    %r = load <4 x i32>, <4 x i32>* %p1, align 16
+  define <4 x i32> @test_load_v4i32(ptr %p1) {
+    %r = load <4 x i32>, ptr %p1, align 16
     ret <4 x i32> %r
   }
 
-  define i32* @test_store_i32(i32 %val, i32* %p1) {
-    store i32 %val, i32* %p1
-    ret i32* %p1
+  define ptr @test_store_i32(i32 %val, ptr %p1) {
+    store i32 %val, ptr %p1
+    ret ptr %p1
   }
 
-  define i64* @test_store_i64(i64 %val, i64* %p1) {
-    store i64 %val, i64* %p1
-    ret i64* %p1
+  define ptr @test_store_i64(i64 %val, ptr %p1) {
+    store i64 %val, ptr %p1
+    ret ptr %p1
   }
 
-  define float* @test_store_float(float %val, float* %p1) {
-    store float %val, float* %p1
-    ret float* %p1
+  define ptr @test_store_float(float %val, ptr %p1) {
+    store float %val, ptr %p1
+    ret ptr %p1
   }
 
-  define double* @test_store_double(double %val, double* %p1) {
-    store double %val, double* %p1
-    ret double* %p1
+  define ptr @test_store_double(double %val, ptr %p1) {
+    store double %val, ptr %p1
+    ret ptr %p1
   }
 
   define void @constInt_check() {
@@ -130,8 +130,8 @@
   }
 
   define void @test_gep() {
-    %p1 = getelementptr i32, i32* undef, i32 5
-    %p2 = getelementptr i32, i32* undef, i64 5
+    %p1 = getelementptr i32, ptr undef, i32 5
+    %p2 = getelementptr i32, ptr undef, i64 5
     ret void
   }
 
@@ -177,9 +177,9 @@
 
   @g_int = global i32 0, align 4
 
-  define i32* @test_global_ptrv() {
+  define ptr @test_global_ptrv() {
   entry:
-    ret i32* @g_int
+    ret ptr @g_int
   }
 
   define i8 @test_undef() {

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-GV-32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-GV-32.mir
index e28051057104a..4ba8606df5dfc 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-GV-32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-GV-32.mir
@@ -7,13 +7,13 @@
 
   define dso_local void @test_global_ptrv() {
   entry:
-    store i32* @g_int, i32** undef
+    store ptr @g_int, ptr undef
     ret void
   }
 
   define dso_local i32 @test_global_valv() {
   entry:
-    %0 = load i32, i32* @g_int, align 4
+    %0 = load i32, ptr @g_int, align 4
     ret i32 %0
   }
 
@@ -36,12 +36,12 @@ registers:
   - { id: 1, class: gpr, preferred-register: '' }
 # X32:                     %0:gr32 = IMPLICIT_DEF
 # X32-NEXT:                %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg
-# X32-NEXT:                MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `i32** undef`)
+# X32-NEXT:                MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `ptr undef`)
 # X32-NEXT:                RET 0
 #
 # X32ABI:                  %0:low32_addr_access = IMPLICIT_DEF
 # X32ABI-NEXT:             %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg
-# X32ABI-NEXT:             MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `i32** undef`)
+# X32ABI-NEXT:             MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `ptr undef`)
 # X32ABI-NEXT:             RET 0
 body:             |
   bb.1.entry:
@@ -49,7 +49,7 @@ body:             |
 
     %0(p0) = IMPLICIT_DEF
     %1(p0) = G_GLOBAL_VALUE @g_int
-    G_STORE %1(p0), %0(p0) :: (store (p0) into `i32** undef`)
+    G_STORE %1(p0), %0(p0) :: (store (p0) into `ptr undef`)
     RET 0
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-GV-64.mir b/llvm/test/CodeGen/X86/GlobalISel/select-GV-64.mir
index aab28d08ce162..4a1f63c987955 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-GV-64.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-GV-64.mir
@@ -7,13 +7,13 @@
 
   define dso_local void @test_global_ptrv() {
   entry:
-    store i32* @g_int, i32** undef
+    store ptr @g_int, ptr undef
     ret void
   }
 
   define dso_local i32 @test_global_valv() {
   entry:
-    %0 = load i32, i32* @g_int, align 4
+    %0 = load i32, ptr @g_int, align 4
     ret i32 %0
   }
 
@@ -33,12 +33,12 @@ registers:
   - { id: 1, class: gpr, preferred-register: '' }
 # X64:                     %0:gr64 = IMPLICIT_DEF
 # X64-NEXT:                %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg
-# X64-NEXT:                MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `i32** undef`)
+# X64-NEXT:                MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `ptr undef`)
 # X64-NEXT:                RET 0
 #
 # X64_DARWIN_PIC:          %0:gr64 = IMPLICIT_DEF
 # X64_DARWIN_PIC-NEXT:     %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg
-# X64_DARWIN_PIC-NEXT:     MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `i32** undef`)
+# X64_DARWIN_PIC-NEXT:     MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store (p0) into `ptr undef`)
 # X64_DARWIN_PIC-NEXT:     RET 0
 #
 body:             |
@@ -47,7 +47,7 @@ body:             |
 
     %0(p0) = IMPLICIT_DEF
     %1(p0) = G_GLOBAL_VALUE @g_int
-    G_STORE %1(p0), %0(p0) :: (store (p0) into `i32** undef`)
+    G_STORE %1(p0), %0(p0) :: (store (p0) into `ptr undef`)
     RET 0
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir b/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir
index 349e107a6379c..a037a736b7317 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -30,8 +30,8 @@
     ret i64 -1
   }
 
-  define void @main(i32** %data) {
-    store i32* null, i32** %data, align 8
+  define void @main(ptr %data) {
+    store ptr null, ptr %data, align 8
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-unordered.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-unordered.mir
index 9a06796429570..e173557791202 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-unordered.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-unordered.mir
@@ -5,97 +5,97 @@
 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
 
 --- |
-  define i8 @test_load_i8(i8* %p1) {
-    %r = load atomic i8, i8* %p1 unordered, align 1
+  define i8 @test_load_i8(ptr %p1) {
+    %r = load atomic i8, ptr %p1 unordered, align 1
     ret i8 %r
   }
 
-  define i16 @test_load_i16(i16* %p1) {
-    %r = load atomic i16, i16* %p1 unordered, align 2
+  define i16 @test_load_i16(ptr %p1) {
+    %r = load atomic i16, ptr %p1 unordered, align 2
     ret i16 %r
   }
 
-  define i32 @test_load_i32(i32* %p1) {
-    %r = load atomic i32, i32* %p1 unordered, align 4
+  define i32 @test_load_i32(ptr %p1) {
+    %r = load atomic i32, ptr %p1 unordered, align 4
     ret i32 %r
   }
 
-  define i64 @test_load_i64(i64* %p1) {
-    %r = load atomic i64, i64* %p1 unordered, align 8
+  define i64 @test_load_i64(ptr %p1) {
+    %r = load atomic i64, ptr %p1 unordered, align 8
     ret i64 %r
   }
 
-  define float @test_load_float(float* %p1) {
-    %r = load atomic float, float* %p1 unordered, align 4
+  define float @test_load_float(ptr %p1) {
+    %r = load atomic float, ptr %p1 unordered, align 4
     ret float %r
   }
 
-  define float @test_load_float_vecreg(float* %p1) {
-    %r = load atomic float, float* %p1 unordered, align 8
+  define float @test_load_float_vecreg(ptr %p1) {
+    %r = load atomic float, ptr %p1 unordered, align 8
     ret float %r
   }
 
-  define double @test_load_double(double* %p1) {
-    %r = load atomic double, double* %p1 unordered, align 8
+  define double @test_load_double(ptr %p1) {
+    %r = load atomic double, ptr %p1 unordered, align 8
     ret double %r
   }
 
-  define double @test_load_double_vecreg(double* %p1) {
-    %r = load atomic double, double* %p1 unordered, align 8
+  define double @test_load_double_vecreg(ptr %p1) {
+    %r = load atomic double, ptr %p1 unordered, align 8
     ret double %r
   }
 
-  define i32* @test_store_i32(i32 %val, i32* %p1) {
-    store atomic i32 %val, i32* %p1 unordered, align 4
-    ret i32* %p1
+  define ptr @test_store_i32(i32 %val, ptr %p1) {
+    store atomic i32 %val, ptr %p1 unordered, align 4
+    ret ptr %p1
   }
 
-  define i64* @test_store_i64(i64 %val, i64* %p1) {
-    store atomic i64 %val, i64* %p1 unordered, align 8
-    ret i64* %p1
+  define ptr @test_store_i64(i64 %val, ptr %p1) {
+    store atomic i64 %val, ptr %p1 unordered, align 8
+    ret ptr %p1
   }
 
-  define float* @test_store_float(float %val, float* %p1) {
-    store atomic float %val, float* %p1 unordered, align 4
-    ret float* %p1
+  define ptr @test_store_float(float %val, ptr %p1) {
+    store atomic float %val, ptr %p1 unordered, align 4
+    ret ptr %p1
   }
 
-  define float* @test_store_float_vec(float %val, float* %p1) {
-    store atomic float %val, float* %p1 unordered, align 4
-    ret float* %p1
+  define ptr @test_store_float_vec(float %val, ptr %p1) {
+    store atomic float %val, ptr %p1 unordered, align 4
+    ret ptr %p1
   }
 
-  define double* @test_store_double(double %val, double* %p1) {
-    store atomic double %val, double* %p1 unordered, align 8
-    ret double* %p1
+  define ptr @test_store_double(double %val, ptr %p1) {
+    store atomic double %val, ptr %p1 unordered, align 8
+    ret ptr %p1
   }
 
-  define double* @test_store_double_vec(double %val, double* %p1) {
-    store atomic double %val, double* %p1 unordered, align 8
-    ret double* %p1
+  define ptr @test_store_double_vec(double %val, ptr %p1) {
+    store atomic double %val, ptr %p1 unordered, align 8
+    ret ptr %p1
   }
 
-  define i32* @test_load_ptr(i32** %ptr1) {
-    %p = load atomic i32*, i32** %ptr1 unordered, align 8
-    ret i32* %p
+  define ptr @test_load_ptr(ptr %ptr1) {
+    %p = load atomic ptr, ptr %ptr1 unordered, align 8
+    ret ptr %p
   }
 
-  define void @test_store_ptr(i32** %ptr1, i32* %a) {
-    store atomic i32* %a, i32** %ptr1 unordered, align 8
+  define void @test_store_ptr(ptr %ptr1, ptr %a) {
+    store atomic ptr %a, ptr %ptr1 unordered, align 8
     ret void
   }
 
-  define i32 @test_gep_folding(i32* %arr, i32 %val) {
-    %arrayidx = getelementptr i32, i32* %arr, i32 5
-    store atomic i32 %val, i32* %arrayidx unordered, align 8
-    %r = load atomic i32, i32* %arrayidx unordered, align 8
+  define i32 @test_gep_folding(ptr %arr, i32 %val) {
+    %arrayidx = getelementptr i32, ptr %arr, i32 5
+    store atomic i32 %val, ptr %arrayidx unordered, align 8
+    %r = load atomic i32, ptr %arrayidx unordered, align 8
     ret i32 %r
   }
 
-  define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) #0 {
-    %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
-    store atomic i32 %val, i32* %arrayidx unordered, align 8
-    %r = load atomic i32, i32* %arrayidx unordered, align 8
+  define i32 @test_gep_folding_largeGepIndex(ptr %arr, i32 %val) #0 {
+    %arrayidx = getelementptr i32, ptr %arr, i64 57179869180
+    store atomic i32 %val, ptr %arrayidx unordered, align 8
+    %r = load atomic i32, ptr %arrayidx unordered, align 8
     ret i32 %r
   }
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
index ea7bf09b06674..00b797416fd94 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
@@ -2,43 +2,43 @@
 # RUN: llc -mtriple=i386-linux-gnu  -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
 
 --- |
-  define i8 @test_load_i8(i8* %p1) {
-    %r = load i8, i8* %p1
+  define i8 @test_load_i8(ptr %p1) {
+    %r = load i8, ptr %p1
     ret i8 %r
   }
 
-  define i16 @test_load_i16(i16* %p1) {
-    %r = load i16, i16* %p1
+  define i16 @test_load_i16(ptr %p1) {
+    %r = load i16, ptr %p1
     ret i16 %r
   }
 
-  define i32 @test_load_i32(i32* %p1) {
-    %r = load i32, i32* %p1
+  define i32 @test_load_i32(ptr %p1) {
+    %r = load i32, ptr %p1
     ret i32 %r
   }
 
-  define i8* @test_store_i8(i8 %val, i8* %p1) {
-    store i8 %val, i8* %p1
-    ret i8* %p1
+  define ptr @test_store_i8(i8 %val, ptr %p1) {
+    store i8 %val, ptr %p1
+    ret ptr %p1
   }
 
-  define i16* @test_store_i16(i16 %val, i16* %p1) {
-    store i16 %val, i16* %p1
-    ret i16* %p1
+  define ptr @test_store_i16(i16 %val, ptr %p1) {
+    store i16 %val, ptr %p1
+    ret ptr %p1
   }
 
-  define i32* @test_store_i32(i32 %val, i32* %p1) {
-    store i32 %val, i32* %p1
-    ret i32* %p1
+  define ptr @test_store_i32(i32 %val, ptr %p1) {
+    store i32 %val, ptr %p1
+    ret ptr %p1
   }
 
-  define i32* @test_load_ptr(i32** %ptr1) {
-    %p = load i32*, i32** %ptr1
-    ret i32* %p
+  define ptr @test_load_ptr(ptr %ptr1) {
+    %p = load ptr, ptr %ptr1
+    ret ptr %p
   }
 
-  define void @test_store_ptr(i32** %ptr1, i32* %a) {
-    store i32* %a, i32** %ptr1
+  define void @test_store_ptr(ptr %ptr1, ptr %a) {
+    store ptr %a, ptr %ptr1
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
index 576bcadf163f0..e2c778b702f4d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
@@ -5,97 +5,97 @@
 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
 
 --- |
-  define i8 @test_load_i8(i8* %p1) {
-    %r = load i8, i8* %p1
+  define i8 @test_load_i8(ptr %p1) {
+    %r = load i8, ptr %p1
     ret i8 %r
   }
 
-  define i16 @test_load_i16(i16* %p1) {
-    %r = load i16, i16* %p1
+  define i16 @test_load_i16(ptr %p1) {
+    %r = load i16, ptr %p1
     ret i16 %r
   }
 
-  define i32 @test_load_i32(i32* %p1) {
-    %r = load i32, i32* %p1
+  define i32 @test_load_i32(ptr %p1) {
+    %r = load i32, ptr %p1
     ret i32 %r
   }
 
-  define i64 @test_load_i64(i64* %p1) {
-    %r = load i64, i64* %p1
+  define i64 @test_load_i64(ptr %p1) {
+    %r = load i64, ptr %p1
     ret i64 %r
   }
 
-  define float @test_load_float(float* %p1) {
-    %r = load float, float* %p1
+  define float @test_load_float(ptr %p1) {
+    %r = load float, ptr %p1
     ret float %r
   }
 
-  define float @test_load_float_vecreg(float* %p1) {
-    %r = load float, float* %p1
+  define float @test_load_float_vecreg(ptr %p1) {
+    %r = load float, ptr %p1
     ret float %r
   }
 
-  define double @test_load_double(double* %p1) {
-    %r = load double, double* %p1
+  define double @test_load_double(ptr %p1) {
+    %r = load double, ptr %p1
     ret double %r
   }
 
-  define double @test_load_double_vecreg(double* %p1) {
-    %r = load double, double* %p1
+  define double @test_load_double_vecreg(ptr %p1) {
+    %r = load double, ptr %p1
     ret double %r
   }
 
-  define i32* @test_store_i32(i32 %val, i32* %p1) {
-    store i32 %val, i32* %p1
-    ret i32* %p1
+  define ptr @test_store_i32(i32 %val, ptr %p1) {
+    store i32 %val, ptr %p1
+    ret ptr %p1
   }
 
-  define i64* @test_store_i64(i64 %val, i64* %p1) {
-    store i64 %val, i64* %p1
-    ret i64* %p1
+  define ptr @test_store_i64(i64 %val, ptr %p1) {
+    store i64 %val, ptr %p1
+    ret ptr %p1
   }
 
-  define float* @test_store_float(float %val, float* %p1) {
-    store float %val, float* %p1
-    ret float* %p1
+  define ptr @test_store_float(float %val, ptr %p1) {
+    store float %val, ptr %p1
+    ret ptr %p1
   }
 
-  define float* @test_store_float_vec(float %val, float* %p1) {
-    store float %val, float* %p1
-    ret float* %p1
+  define ptr @test_store_float_vec(float %val, ptr %p1) {
+    store float %val, ptr %p1
+    ret ptr %p1
   }
 
-  define double* @test_store_double(double %val, double* %p1) {
-    store double %val, double* %p1
-    ret double* %p1
+  define ptr @test_store_double(double %val, ptr %p1) {
+    store double %val, ptr %p1
+    ret ptr %p1
   }
 
-  define double* @test_store_double_vec(double %val, double* %p1) {
-    store double %val, double* %p1
-    ret double* %p1
+  define ptr @test_store_double_vec(double %val, ptr %p1) {
+    store double %val, ptr %p1
+    ret ptr %p1
   }
 
-  define i32* @test_load_ptr(i32** %ptr1) {
-    %p = load i32*, i32** %ptr1
-    ret i32* %p
+  define ptr @test_load_ptr(ptr %ptr1) {
+    %p = load ptr, ptr %ptr1
+    ret ptr %p
   }
 
-  define void @test_store_ptr(i32** %ptr1, i32* %a) {
-    store i32* %a, i32** %ptr1
+  define void @test_store_ptr(ptr %ptr1, ptr %a) {
+    store ptr %a, ptr %ptr1
     ret void
   }
 
-  define i32 @test_gep_folding(i32* %arr, i32 %val) {
-    %arrayidx = getelementptr i32, i32* %arr, i32 5
-    store i32 %val, i32* %arrayidx
-    %r = load i32, i32* %arrayidx
+  define i32 @test_gep_folding(ptr %arr, i32 %val) {
+    %arrayidx = getelementptr i32, ptr %arr, i32 5
+    store i32 %val, ptr %arrayidx
+    %r = load i32, ptr %arrayidx
     ret i32 %r
   }
 
-  define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) #0 {
-    %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
-    store i32 %val, i32* %arrayidx
-    %r = load i32, i32* %arrayidx
+  define i32 @test_gep_folding_largeGepIndex(ptr %arr, i32 %val) #0 {
+    %arrayidx = getelementptr i32, ptr %arr, i64 57179869180
+    store i32 %val, ptr %arrayidx
+    %r = load i32, ptr %arrayidx
     ret i32 %r
   }
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
index 8dafc3cbb7223..f1530940998ed 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
@@ -4,24 +4,24 @@
 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=ALL,AVX512ALL,AVX512VL
 
 --- |
-  define <4 x i32> @test_load_v4i32_noalign(<4 x i32>* %p1) {
-    %r = load <4 x i32>, <4 x i32>* %p1, align 1
+  define <4 x i32> @test_load_v4i32_noalign(ptr %p1) {
+    %r = load <4 x i32>, ptr %p1, align 1
     ret <4 x i32> %r
   }
 
-  define <4 x i32> @test_load_v4i32_align(<4 x i32>* %p1) {
-    %r = load <4 x i32>, <4 x i32>* %p1, align 16
+  define <4 x i32> @test_load_v4i32_align(ptr %p1) {
+    %r = load <4 x i32>, ptr %p1, align 16
     ret <4 x i32> %r
   }
 
-  define <4 x i32>* @test_store_v4i32_align(<4 x i32> %val, <4 x i32>* %p1) {
-    store <4 x i32> %val, <4 x i32>* %p1, align 16
-    ret <4 x i32>* %p1
+  define ptr @test_store_v4i32_align(<4 x i32> %val, ptr %p1) {
+    store <4 x i32> %val, ptr %p1, align 16
+    ret ptr %p1
   }
 
-  define <4 x i32>* @test_store_v4i32_noalign(<4 x i32> %val, <4 x i32>* %p1) {
-    store <4 x i32> %val, <4 x i32>* %p1, align 1
-    ret <4 x i32>* %p1
+  define ptr @test_store_v4i32_noalign(<4 x i32> %val, ptr %p1) {
+    store <4 x i32> %val, ptr %p1, align 1
+    ret ptr %p1
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
index af839a5d247dd..f24a82899c62e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
@@ -4,23 +4,23 @@
 
 
 --- |
-  define <8 x i32> @test_load_v8i32_noalign(<8 x i32>* %p1) {
-    %r = load <8 x i32>, <8 x i32>* %p1, align 1
+  define <8 x i32> @test_load_v8i32_noalign(ptr %p1) {
+    %r = load <8 x i32>, ptr %p1, align 1
     ret <8 x i32> %r
   }
 
-  define <8 x i32> @test_load_v8i32_align(<8 x i32>* %p1) {
-    %r = load <8 x i32>, <8 x i32>* %p1, align 32
+  define <8 x i32> @test_load_v8i32_align(ptr %p1) {
+    %r = load <8 x i32>, ptr %p1, align 32
     ret <8 x i32> %r
   }
 
-  define void @test_store_v8i32_noalign(<8 x i32> %val, <8 x i32>* %p1) {
-    store <8 x i32> %val, <8 x i32>* %p1, align 1
+  define void @test_store_v8i32_noalign(<8 x i32> %val, ptr %p1) {
+    store <8 x i32> %val, ptr %p1, align 1
     ret void
   }
 
-  define void @test_store_v8i32_align(<8 x i32> %val, <8 x i32>* %p1) {
-    store <8 x i32> %val, <8 x i32>* %p1, align 32
+  define void @test_store_v8i32_align(<8 x i32> %val, ptr %p1) {
+    store <8 x i32> %val, ptr %p1, align 32
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
index 6aea213b9e7e3..b09daa4940de8 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
@@ -1,23 +1,23 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512F
 --- |
-  define <16 x i32> @test_load_v16i32_noalign(<16 x i32>* %p1) {
-    %r = load <16 x i32>, <16 x i32>* %p1, align 1
+  define <16 x i32> @test_load_v16i32_noalign(ptr %p1) {
+    %r = load <16 x i32>, ptr %p1, align 1
     ret <16 x i32> %r
   }
 
-  define <16 x i32> @test_load_v16i32_align(<16 x i32>* %p1) {
-    %r = load <16 x i32>, <16 x i32>* %p1, align 32
+  define <16 x i32> @test_load_v16i32_align(ptr %p1) {
+    %r = load <16 x i32>, ptr %p1, align 32
     ret <16 x i32> %r
   }
 
-  define void @test_store_v16i32_noalign(<16 x i32> %val, <16 x i32>* %p1) {
-    store <16 x i32> %val, <16 x i32>* %p1, align 1
+  define void @test_store_v16i32_noalign(<16 x i32> %val, ptr %p1) {
+    store <16 x i32> %val, ptr %p1, align 1
     ret void
   }
 
-  define void @test_store_v16i32_align(<16 x i32> %val, <16 x i32>* %p1) {
-    store <16 x i32> %val, <16 x i32>* %p1, align 32
+  define void @test_store_v16i32_align(<16 x i32> %val, ptr %p1) {
+    store <16 x i32> %val, ptr %p1, align 32
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-ptr-add.mir b/llvm/test/CodeGen/X86/GlobalISel/select-ptr-add.mir
index b8214017a2dad..cbabc7cec2dfd 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-ptr-add.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-ptr-add.mir
@@ -2,9 +2,9 @@
 # RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define i32* @test_gep_i32(i32* %arr) {
-    %arrayidx = getelementptr i32, i32* %arr, i32 5
-    ret i32* %arrayidx
+  define ptr @test_gep_i32(ptr %arr) {
+    %arrayidx = getelementptr i32, ptr %arr, i32 5
+    ret ptr %arrayidx
   }
 ...
 ---

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir b/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
index b03f0fea0a668..55cbbb4e1730c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
@@ -2,9 +2,9 @@
 # RUN: llc -mtriple=x86_64-linux-gnux32 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define i32* @allocai32() {
+  define ptr @allocai32() {
     %ptr1 = alloca i32
-    ret i32* %ptr1
+    ret ptr %ptr1
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
index 35ab52291a6c1..61f9eb9a72874 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
@@ -3,9 +3,9 @@
 
   @g_int = global i32 0, align 4
 
-  define i32* @test_global_ptrv() {
+  define ptr @test_global_ptrv() {
   entry:
-    ret i32* @g_int
+    ret ptr @g_int
   }
 ...
 ---

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
index 5bc6beedbbe7f..6cfeb76bc4ad3 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
@@ -3,10 +3,10 @@
 
 --- |
 
-  define i32* @inttoptr_p0_s32(i32 %val) {
+  define ptr @inttoptr_p0_s32(i32 %val) {
   entry:
-    %0 = inttoptr i32 %val to i32*
-    ret i32* %0
+    %0 = inttoptr i32 %val to ptr
+    ret ptr %0
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
index feecc2effa20a..5f0d46f8e913b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
@@ -3,27 +3,27 @@
 
 --- |
 
-  define i1 @ptrtoint_s1_p0(i64* %p) {
+  define i1 @ptrtoint_s1_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i1
+    %0 = ptrtoint ptr %p to i1
     ret i1 %0
   }
 
-  define i8 @ptrtoint_s8_p0(i64* %p) {
+  define i8 @ptrtoint_s8_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i8
+    %0 = ptrtoint ptr %p to i8
     ret i8 %0
   }
 
-  define i16 @ptrtoint_s16_p0(i64* %p) {
+  define i16 @ptrtoint_s16_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i16
+    %0 = ptrtoint ptr %p to i16
     ret i16 %0
   }
 
-  define i32 @ptrtoint_s32_p0(i64* %p) {
+  define i32 @ptrtoint_s32_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i32
+    %0 = ptrtoint ptr %p to i32
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
index 80cbb6d1089a2..ea5cf25b7f71d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
@@ -2,9 +2,9 @@
 # RUN: llc -mtriple=i386-linux-gnu      -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define i32* @allocai32() {
+  define ptr @allocai32() {
     %ptr1 = alloca i32
-    ret i32* %ptr1
+    ret ptr %ptr1
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
index 32ff65102c1b9..a5273eb9dad2c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
@@ -3,10 +3,10 @@
 
 --- |
 
-  define i32* @inttoptr_p0_s32(i32 %val) {
+  define ptr @inttoptr_p0_s32(i32 %val) {
   entry:
-    %0 = inttoptr i32 %val to i32*
-    ret i32* %0
+    %0 = inttoptr i32 %val to ptr
+    ret ptr %0
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
index 8da74d0f49f9d..768120b2739d6 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
@@ -3,27 +3,27 @@
 
 --- |
 
-  define i1 @ptrtoint_s1_p0(i64* %p) {
+  define i1 @ptrtoint_s1_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i1
+    %0 = ptrtoint ptr %p to i1
     ret i1 %0
   }
 
-  define i8 @ptrtoint_s8_p0(i64* %p) {
+  define i8 @ptrtoint_s8_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i8
+    %0 = ptrtoint ptr %p to i8
     ret i8 %0
   }
 
-  define i16 @ptrtoint_s16_p0(i64* %p) {
+  define i16 @ptrtoint_s16_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i16
+    %0 = ptrtoint ptr %p to i16
     ret i16 %0
   }
 
-  define i32 @ptrtoint_s32_p0(i64* %p) {
+  define i32 @ptrtoint_s32_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i32
+    %0 = ptrtoint ptr %p to i32
     ret i32 %0
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
index 5235ae04e2fc7..a2cf55dc2ba54 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
@@ -3,9 +3,9 @@
 
   @g_int = global i32 0, align 4
 
-  define i32* @test_global_ptrv() {
+  define ptr @test_global_ptrv() {
   entry:
-    ret i32* @g_int
+    ret ptr @g_int
   }
 ...
 ---

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-inttoptr.mir
index 90aed846b95b1..d3f5018533f27 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-inttoptr.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-inttoptr.mir
@@ -3,10 +3,10 @@
 
 --- |
 
-  define i64* @inttoptr_p0_s64(i64 %val) {
+  define ptr @inttoptr_p0_s64(i64 %val) {
   entry:
-    %0 = inttoptr i64 %val to i64*
-    ret i64* %0
+    %0 = inttoptr i64 %val to ptr
+    ret ptr %0
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-ptrtoint.mir
index 3dc342e6ee0be..215db7f9302c2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-ptrtoint.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-ptrtoint.mir
@@ -3,33 +3,33 @@
 
 --- |
 
-  define i1 @ptrtoint_s1_p0(i64* %p) {
+  define i1 @ptrtoint_s1_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i1
+    %0 = ptrtoint ptr %p to i1
     ret i1 %0
   }
 
-  define i8 @ptrtoint_s8_p0(i64* %p) {
+  define i8 @ptrtoint_s8_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i8
+    %0 = ptrtoint ptr %p to i8
     ret i8 %0
   }
 
-  define i16 @ptrtoint_s16_p0(i64* %p) {
+  define i16 @ptrtoint_s16_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i16
+    %0 = ptrtoint ptr %p to i16
     ret i16 %0
   }
 
-  define i32 @ptrtoint_s32_p0(i64* %p) {
+  define i32 @ptrtoint_s32_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i32
+    %0 = ptrtoint ptr %p to i32
     ret i32 %0
   }
 
-  define i64 @ptrtoint_s64_p0(i64* %p) {
+  define i64 @ptrtoint_s64_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i64
+    %0 = ptrtoint ptr %p to i64
     ret i64 %0
   }
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
index fe0cf0307c35c..cdcf1dfa71c90 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
@@ -2,9 +2,9 @@
 # RUN: llc -mtriple=x86_64-linux-gnu    -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
 
 --- |
-  define i32* @allocai32() {
+  define ptr @allocai32() {
     %ptr1 = alloca i32
-    ret i32* %ptr1
+    ret ptr %ptr1
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-inttoptr.mir
index f9ce11e48f744..dd631284ce3e4 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-inttoptr.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-inttoptr.mir
@@ -3,10 +3,10 @@
 
 --- |
 
-  define i64* @inttoptr_p0_s64(i64 %val) {
+  define ptr @inttoptr_p0_s64(i64 %val) {
   entry:
-    %0 = inttoptr i64 %val to i64*
-    ret i64* %0
+    %0 = inttoptr i64 %val to ptr
+    ret ptr %0
   }
 
 ...

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-ptrtoint.mir
index 4f330f6119fa5..328cf718b2353 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-ptrtoint.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-ptrtoint.mir
@@ -3,33 +3,33 @@
 
 --- |
 
-  define i1 @ptrtoint_s1_p0(i64* %p) {
+  define i1 @ptrtoint_s1_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i1
+    %0 = ptrtoint ptr %p to i1
     ret i1 %0
   }
 
-  define i8 @ptrtoint_s8_p0(i64* %p) {
+  define i8 @ptrtoint_s8_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i8
+    %0 = ptrtoint ptr %p to i8
     ret i8 %0
   }
 
-  define i16 @ptrtoint_s16_p0(i64* %p) {
+  define i16 @ptrtoint_s16_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i16
+    %0 = ptrtoint ptr %p to i16
     ret i16 %0
   }
 
-  define i32 @ptrtoint_s32_p0(i64* %p) {
+  define i32 @ptrtoint_s32_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i32
+    %0 = ptrtoint ptr %p to i32
     ret i32 %0
   }
 
-  define i64 @ptrtoint_s64_p0(i64* %p) {
+  define i64 @ptrtoint_s64_p0(ptr %p) {
   entry:
-    %0 = ptrtoint i64* %p to i64
+    %0 = ptrtoint ptr %p to i64
     ret i64 %0
   }
 

diff  --git a/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir b/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir
index 0fdddeb7257e4..9391430cf3800 100644
--- a/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir
+++ b/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir
@@ -12,23 +12,23 @@
   target triple = "x86_64-apple-macosx10.8.0"
 
   ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-  declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #0
+  declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
 
   ; Function Attrs: argmemonly nofree nosync nounwind willreturn
-  declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #0
+  declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
 
   define i32 @test_1(i32 %in) #1 !dbg !5 {
   entry:
-    %a1 = alloca [14 x i8*], align 8
-    %a4 = alloca [11 x i8*], align 8
-    call void @llvm.dbg.value(metadata [11 x i8*]* %a4, metadata !9, metadata !DIExpression()), !dbg !11
+    %a1 = alloca [14 x ptr], align 8
+    %a4 = alloca [11 x ptr], align 8
+    call void @llvm.dbg.value(metadata ptr %a4, metadata !9, metadata !DIExpression()), !dbg !11
     ret i32 0
   }
 
   define i32 @test_2(i32 %in) #1 !dbg !12 {
   entry:
-    %a1 = alloca [14 x i8*], align 8
-    %a4 = alloca [11 x i8*], align 8
+    %a1 = alloca [14 x ptr], align 8
+    %a4 = alloca [11 x ptr], align 8
     ret i32 0
   }
 

diff  --git a/llvm/test/CodeGen/X86/adx-commute.mir b/llvm/test/CodeGen/X86/adx-commute.mir
index 1f74abeb64a4b..5683d0610de52 100644
--- a/llvm/test/CodeGen/X86/adx-commute.mir
+++ b/llvm/test/CodeGen/X86/adx-commute.mir
@@ -6,35 +6,35 @@
   source_filename = "test.ll"
   target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
-  define void @adcx32_commute(i8 %cf, i32 %a, i32 %b, i32* %res) #0 {
+  define void @adcx32_commute(i8 %cf, i32 %a, i32 %b, ptr %res) #0 {
     %ret = call { i8, i32 } @llvm.x86.addcarry.32(i8 %cf, i32 %a, i32 %b)
     %1 = extractvalue { i8, i32 } %ret, 1
     %2 = mul i32 %a, %1
-    store i32 %2, i32* %res
+    store i32 %2, ptr %res
     ret void
   }
 
-  define void @adcx64_commute(i8 %cf, i64 %a, i64 %b, i64* %res) #0 {
+  define void @adcx64_commute(i8 %cf, i64 %a, i64 %b, ptr %res) #0 {
     %ret = call { i8, i64 } @llvm.x86.addcarry.64(i8 %cf, i64 %a, i64 %b)
     %1 = extractvalue { i8, i64 } %ret, 1
     %2 = mul i64 %a, %1
-    store i64 %2, i64* %res
+    store i64 %2, ptr %res
     ret void
   }
 
-  define void @adox32_commute(i8 %cf, i32 %a, i32 %b, i32* %res) #0 {
+  define void @adox32_commute(i8 %cf, i32 %a, i32 %b, ptr %res) #0 {
     %ret = call { i8, i32 } @llvm.x86.addcarry.32(i8 %cf, i32 %a, i32 %b)
     %1 = extractvalue { i8, i32 } %ret, 1
     %2 = mul i32 %a, %1
-    store i32 %2, i32* %res
+    store i32 %2, ptr %res
     ret void
   }
 
-  define void @adox64_commute(i8 %cf, i64 %a, i64 %b, i64* %res) #0 {
+  define void @adox64_commute(i8 %cf, i64 %a, i64 %b, ptr %res) #0 {
     %ret = call { i8, i64 } @llvm.x86.addcarry.64(i8 %cf, i64 %a, i64 %b)
     %1 = extractvalue { i8, i64 } %ret, 1
     %2 = mul i64 %a, %1
-    store i64 %2, i64* %res
+    store i64 %2, ptr %res
     ret void
   }
 
@@ -45,7 +45,7 @@
   declare { i8, i64 } @llvm.x86.addcarry.64(i8, i64, i64) #1
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
 
   attributes #0 = { "target-features"="+adx" }
   attributes #1 = { nounwind readnone "target-features"="+adx" }

diff  --git a/llvm/test/CodeGen/X86/basic-block-sections-mir-parse.mir b/llvm/test/CodeGen/X86/basic-block-sections-mir-parse.mir
index ab284955e4955..967622a11cd2b 100644
--- a/llvm/test/CodeGen/X86/basic-block-sections-mir-parse.mir
+++ b/llvm/test/CodeGen/X86/basic-block-sections-mir-parse.mir
@@ -18,21 +18,21 @@
     %2 = alloca i32, align 4
     %3 = alloca i8, align 1
     %4 = zext i1 %0 to i8
-    store i8 %4, i8* %3, align 1
-    %5 = load i8, i8* %3, align 1
+    store i8 %4, ptr %3, align 1
+    %5 = load i8, ptr %3, align 1
     %6 = trunc i8 %5 to i1
     br i1 %6, label %7, label %8
   
   7:                                                ; preds = %1
-    store i32 1, i32* %2, align 4
+    store i32 1, ptr %2, align 4
     br label %9
   
   8:                                                ; preds = %1
-    store i32 0, i32* %2, align 4
+    store i32 0, ptr %2, align 4
     br label %9
   
   9:                                                ; preds = %8, %7
-    %10 = load i32, i32* %2, align 4
+    %10 = load i32, ptr %2, align 4
     ret i32 %10
   }
   

diff  --git a/llvm/test/CodeGen/X86/block-placement.mir b/llvm/test/CodeGen/X86/block-placement.mir
index 81d58effbdc2c..3f69ca0a40ad3 100644
--- a/llvm/test/CodeGen/X86/block-placement.mir
+++ b/llvm/test/CodeGen/X86/block-placement.mir
@@ -5,31 +5,31 @@
   source_filename = "test.ll"
   target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
   
-  declare void @stub(i32*)
+  declare void @stub(ptr)
   
-  define i32 @f(i32* %ptr, i1 %cond) {
+  define i32 @f(ptr %ptr, i1 %cond) {
   entry:
     br i1 %cond, label %left, label %right
   
   left:                                             ; preds = %entry
-    %is_null = icmp eq i32* %ptr, null
+    %is_null = icmp eq ptr %ptr, null
     br i1 %is_null, label %null, label %not_null, !prof !0, !make.implicit !1
   
   not_null:                                         ; preds = %left
-    %val = load i32, i32* %ptr
+    %val = load i32, ptr %ptr
     ret i32 %val
   
   null:                                             ; preds = %left
-    call void @stub(i32* %ptr)
+    call void @stub(ptr %ptr)
     unreachable
   
   right:                                            ; preds = %entry
-    call void @stub(i32* null)
+    call void @stub(ptr null)
     unreachable
   }
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
   
   attributes #0 = { nounwind }
   

diff  --git a/llvm/test/CodeGen/X86/callbr-asm-kill.mir b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
index bd279020b65e6..86c58c4715ed7 100644
--- a/llvm/test/CodeGen/X86/callbr-asm-kill.mir
+++ b/llvm/test/CodeGen/X86/callbr-asm-kill.mir
@@ -10,18 +10,18 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  declare void @foo(i8*)
+  declare void @foo(ptr)
 
   ; Function Attrs: nounwind
-  define void @test1(i8* %arg, i8** %mem) #0 {
+  define void @test1(ptr %arg, ptr %mem) #0 {
   entry:
     br label %loop
 
   loop:                                             ; preds = %loop, %entry
-    %a = phi i8* [ %arg, %entry ], [ %b, %loop ]
-    %b = load i8*, i8** %mem, align 8
-    call void @foo(i8* %a)
-    callbr void asm sideeffect "", "*m,!i"(i8* elementtype(i8) %b)
+    %a = phi ptr [ %arg, %entry ], [ %b, %loop ]
+    %b = load ptr, ptr %mem, align 8
+    call void @foo(ptr %a)
+    callbr void asm sideeffect "", "*m,!i"(ptr elementtype(i8) %b)
             to label %end [label %loop]
 
   end:                                              ; preds = %loop

diff  --git a/llvm/test/CodeGen/X86/cf-opt-memops.mir b/llvm/test/CodeGen/X86/cf-opt-memops.mir
index 7b63cb2fdc984..44dead87d2e1c 100644
--- a/llvm/test/CodeGen/X86/cf-opt-memops.mir
+++ b/llvm/test/CodeGen/X86/cf-opt-memops.mir
@@ -17,14 +17,14 @@
   
   define dso_local void @initoutput() local_unnamed_addr {
   entry:
-    %call1 = tail call i32 (i8*, ...) @printf(i8* nonnull dereferenceable(1) getelementptr inbounds ([34 x i8], [34 x i8]* @.str.8, i64 0, i64 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.9, i64 0, i64 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.10, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.11, i64 0, i64 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.12, i64 0, i64 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.13, i64 0, i64 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.14, i64 0, i64 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.15, i64 0, i64 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.16, i64 0, i64 0))
+    %call1 = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str.8, ptr @.str.9, ptr @.str.10, ptr @.str.11, ptr @.str.12, ptr @.str.13, ptr @.str.14, ptr @.str.15, ptr @.str.16)
     ret void
   }
   
-  declare dso_local i32 @printf(i8* nocapture readonly, ...) local_unnamed_addr
+  declare dso_local i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
   
   attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/X86/codegen-prepare-replacephi.mir b/llvm/test/CodeGen/X86/codegen-prepare-replacephi.mir
index a8812b4055d7f..aceb344d8b76c 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-replacephi.mir
+++ b/llvm/test/CodeGen/X86/codegen-prepare-replacephi.mir
@@ -8,30 +8,28 @@
 --- |
   define void @f1() {
   entry:
-    %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* undef, i16 0, i16 2
-    %0 = bitcast i16* %arrayidx to i32*
-    %1 = bitcast [2 x i16]* undef to i32*
+    %arrayidx = getelementptr inbounds [2 x i16], ptr undef, i16 0, i16 2
     br label %for.cond
 
   for.cond:
-    %2 = phi i32* [ %0, %entry ], [ %7, %cleanup ]
-    %3 = phi i32* [ %0, %entry ], [ %9, %cleanup ]
+    %0 = phi ptr [ %arrayidx, %entry ], [ %5, %cleanup ]
+    %1 = phi ptr [ %arrayidx, %entry ], [ %7, %cleanup ]
     br label %for.body
 
   for.body:
-    %4 = phi i32* [ %3, %for.cond ], [ %9, %cleanup ]
-    %5 = phi i32* [ %2, %for.cond ], [ %9, %cleanup ]
-    %6 = phi i32* [ %2, %for.cond ], [ %9, %cleanup ]
+    %2 = phi ptr [ %1, %for.cond ], [ %7, %cleanup ]
+    %3 = phi ptr [ %0, %for.cond ], [ %7, %cleanup ]
+    %4 = phi ptr [ %0, %for.cond ], [ %7, %cleanup ]
     br i1 false, label %for.cond2, label %if.then
 
   if.then:
-    store i32 undef, i32* %4, align 1
+    store i32 undef, ptr %2, align 1
     unreachable
 
   for.cond2:
-    %7 = phi i32* [ %6, %for.body ], [ %7, %if.then5 ], [ %1, %for.cond2 ]
-    %8 = phi i32* [ %5, %for.body ], [ %8, %if.then5 ], [ %1, %for.cond2 ]
-    %9 = phi i32* [ %4, %for.body ], [ %8, %if.then5 ], [ %1, %for.cond2 ]
+    %5 = phi ptr [ %4, %for.body ], [ %5, %if.then5 ], [ undef, %for.cond2 ]
+    %6 = phi ptr [ %3, %for.body ], [ %6, %if.then5 ], [ undef, %for.cond2 ]
+    %7 = phi ptr [ %2, %for.body ], [ %6, %if.then5 ], [ undef, %for.cond2 ]
     br i1 undef, label %for.cond2, label %if.then5
 
   if.then5:

diff  --git a/llvm/test/CodeGen/X86/codegen-prepare-replacephi2.mir b/llvm/test/CodeGen/X86/codegen-prepare-replacephi2.mir
index dba9ed5bbd35e..cdc47606a11e8 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-replacephi2.mir
+++ b/llvm/test/CodeGen/X86/codegen-prepare-replacephi2.mir
@@ -9,36 +9,34 @@
 
   define void @f1() {
   entry:
-    %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* undef, i16 0, i16 2
-    %0 = bitcast i16* %arrayidx to i32*
-    %1 = bitcast [2 x i16]* undef to i32*
+    %arrayidx = getelementptr inbounds [2 x i16], ptr undef, i16 0, i16 2
     br label %for.cond
 
   for.cond.loopexit:
     br label %for.cond
 
   for.cond:
-    %2 = phi i32* [ %0, %entry ], [ %12, %for.cond.loopexit ]
-    %3 = phi i32* [ %0, %entry ], [ %14, %for.cond.loopexit ]
+    %0 = phi ptr [ %arrayidx, %entry ], [ %10, %for.cond.loopexit ]
+    %1 = phi ptr [ %arrayidx, %entry ], [ %12, %for.cond.loopexit ]
     br label %for.body
 
   for.body:
-    %4 = phi i32* [ %3, %for.cond ], [ %14, %cleanup ]
-    %5 = phi i32* [ %2, %for.cond ], [ %13, %cleanup ]
-    %6 = phi i32* [ %2, %for.cond ], [ %12, %cleanup ]
+    %2 = phi ptr [ %1, %for.cond ], [ %12, %cleanup ]
+    %3 = phi ptr [ %0, %for.cond ], [ %11, %cleanup ]
+    %4 = phi ptr [ %0, %for.cond ], [ %10, %cleanup ]
     br i1 undef, label %for.cond2.preheader, label %if.then
 
   for.cond2.preheader:
     br label %for.cond2
 
   if.then:
-    store i32 undef, i32* %4, align 1
+    store i32 undef, ptr %2, align 1
     br label %cleanup
 
   for.cond2:
-    %7 = phi i32* [ %10, %for.inc ], [ %6, %for.cond2.preheader ]
-    %8 = phi i32* [ %11, %for.inc ], [ %5, %for.cond2.preheader ]
-    %9 = phi i32* [ %11, %for.inc ], [ %4, %for.cond2.preheader ]
+    %5 = phi ptr [ %8, %for.inc ], [ %4, %for.cond2.preheader ]
+    %6 = phi ptr [ %9, %for.inc ], [ %3, %for.cond2.preheader ]
+    %7 = phi ptr [ %9, %for.inc ], [ %2, %for.cond2.preheader ]
     br i1 undef, label %for.inc, label %if.then5
 
   if.then5:
@@ -48,17 +46,17 @@
     br label %for.inc
 
   for.inc:
-    %10 = phi i32* [ %7, %if.end ], [ %1, %for.cond2 ]
-    %11 = phi i32* [ %8, %if.end ], [ %1, %for.cond2 ]
+    %8 = phi ptr [ %5, %if.end ], [ undef, %for.cond2 ]
+    %9 = phi ptr [ %6, %if.end ], [ undef, %for.cond2 ]
     br label %for.cond2
 
   cleanup.loopexit:
     br label %cleanup
 
   cleanup:
-    %12 = phi i32* [ %6, %if.then ], [ %7, %cleanup.loopexit ]
-    %13 = phi i32* [ %5, %if.then ], [ %8, %cleanup.loopexit ]
-    %14 = phi i32* [ %4, %if.then ], [ %9, %cleanup.loopexit ]
+    %10 = phi ptr [ %4, %if.then ], [ %5, %cleanup.loopexit ]
+    %11 = phi ptr [ %3, %if.then ], [ %6, %cleanup.loopexit ]
+    %12 = phi ptr [ %2, %if.then ], [ %7, %cleanup.loopexit ]
     br i1 true, label %for.cond.loopexit, label %for.body
   }
 

diff  --git a/llvm/test/CodeGen/X86/copy-eflags-liveinlists.mir b/llvm/test/CodeGen/X86/copy-eflags-liveinlists.mir
index 35ac60453c287..e50947a48112b 100644
--- a/llvm/test/CodeGen/X86/copy-eflags-liveinlists.mir
+++ b/llvm/test/CodeGen/X86/copy-eflags-liveinlists.mir
@@ -6,7 +6,7 @@
 # CHECK-NOT: liveins: $eflags
 
 --- |
-  define void @fun(i16 %arg, i64 %arg1, i8 %arg2, i8* %arg3, i32 %arg4) { ret void}
+  define void @fun(i16 %arg, i64 %arg1, i8 %arg2, ptr %arg3, i32 %arg4) { ret void}
 ...
 ---
 name:            fun

diff  --git a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding2.mir b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding2.mir
index 84f205ad77458..0ad9222a0741a 100644
--- a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding2.mir
+++ b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding2.mir
@@ -46,7 +46,7 @@
     br i1 1, label %for.cond.cleanup, label %for.body.11
   
   for.body.11:
-    %d.0.d.0..12 = load volatile i8, i8* %d, align 1
+    %d.0.d.0..12 = load volatile i8, ptr %d, align 1
     call void @llvm.dbg.value(metadata i8 %d.0.d.0..12, metadata !16, metadata !DIExpression()), !dbg !19
     br label %for.cond.cleanup
   }

diff  --git a/llvm/test/CodeGen/X86/domain-reassignment.mir b/llvm/test/CodeGen/X86/domain-reassignment.mir
index 75bfeec188ae7..f4e454e3fa497 100644
--- a/llvm/test/CodeGen/X86/domain-reassignment.mir
+++ b/llvm/test/CodeGen/X86/domain-reassignment.mir
@@ -6,7 +6,7 @@
   target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-unknown"
 
-  define void @test_fcmp_storefloat(i1 %cond, float* %fptr, float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) #0 {
+  define void @test_fcmp_storefloat(i1 %cond, ptr %fptr, float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) #0 {
   entry:
     br i1 %cond, label %if, label %else
 
@@ -21,7 +21,7 @@
   exit:                                             ; preds = %else, %if
     %val = phi i1 [ %cmp1, %if ], [ %cmp2, %else ]
     %selected = select i1 %val, float %f1, float %f2
-    store float %selected, float* %fptr
+    store float %selected, ptr %fptr
     ret void
   }
 

diff  --git a/llvm/test/CodeGen/X86/expand-call-rvmarker.mir b/llvm/test/CodeGen/X86/expand-call-rvmarker.mir
index 4f16265e1ea95..33069fc486a46 100644
--- a/llvm/test/CodeGen/X86/expand-call-rvmarker.mir
+++ b/llvm/test/CodeGen/X86/expand-call-rvmarker.mir
@@ -4,9 +4,9 @@
   target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-apple-macosx11.0.0"
 
-  declare i8* @objc_retainAutoreleasedReturnValue(i8*)
-  declare i8* @objc_unsafeClaimAutoreleasedReturnValue(i8*)
-  declare i8* @fn()
+  declare ptr @objc_retainAutoreleasedReturnValue(ptr)
+  declare ptr @objc_unsafeClaimAutoreleasedReturnValue(ptr)
+  declare ptr @fn()
 
   define void @test_objc_retainAutoreleaseReturnedValue() {
     ret void

diff  --git a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
index 7ab45fe7e23db..38610c92e9704 100644
--- a/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
+++ b/llvm/test/CodeGen/X86/fast-regalloc-live-out-debug-values.mir
@@ -6,59 +6,59 @@
   define dso_local i32 @foo(i32 %a) #0 !dbg !6 {
   entry:
     %a.addr = alloca i32, align 4
-    %saved_stack = alloca i8*, align 8
+    %saved_stack = alloca ptr, align 8
     %__vla_expr0 = alloca i64, align 8
     %i = alloca i32, align 4
-    store i32 %a, i32* %a.addr, align 4
-    call void @llvm.dbg.declare(metadata i32* %a.addr, metadata !11, metadata !DIExpression()), !dbg !12
-    %0 = load i32, i32* %a.addr, align 4, !dbg !13
+    store i32 %a, ptr %a.addr, align 4
+    call void @llvm.dbg.declare(metadata ptr %a.addr, metadata !11, metadata !DIExpression()), !dbg !12
+    %0 = load i32, ptr %a.addr, align 4, !dbg !13
     %1 = zext i32 %0 to i64, !dbg !14
-    %2 = call i8* @llvm.stacksave(), !dbg !14
-    store i8* %2, i8** %saved_stack, align 8, !dbg !14
+    %2 = call ptr @llvm.stacksave(), !dbg !14
+    store ptr %2, ptr %saved_stack, align 8, !dbg !14
     %vla = alloca i32, i64 %1, align 16, !dbg !14
-    store i64 %1, i64* %__vla_expr0, align 8, !dbg !14
-    call void @llvm.dbg.declare(metadata i64* %__vla_expr0, metadata !15, metadata !DIExpression()), !dbg !17
-    call void @llvm.dbg.declare(metadata i32* %vla, metadata !18, metadata !DIExpression()), !dbg !22
-    call void @llvm.dbg.declare(metadata i32* %i, metadata !23, metadata !DIExpression()), !dbg !25
-    store i32 0, i32* %i, align 4, !dbg !25
+    store i64 %1, ptr %__vla_expr0, align 8, !dbg !14
+    call void @llvm.dbg.declare(metadata ptr %__vla_expr0, metadata !15, metadata !DIExpression()), !dbg !17
+    call void @llvm.dbg.declare(metadata ptr %vla, metadata !18, metadata !DIExpression()), !dbg !22
+    call void @llvm.dbg.declare(metadata ptr %i, metadata !23, metadata !DIExpression()), !dbg !25
+    store i32 0, ptr %i, align 4, !dbg !25
     br label %for.cond, !dbg !26
 
   for.cond:                                         ; preds = %for.inc, %entry
-    %3 = load i32, i32* %i, align 4, !dbg !27
-    %4 = load i32, i32* %a.addr, align 4, !dbg !29
+    %3 = load i32, ptr %i, align 4, !dbg !27
+    %4 = load i32, ptr %a.addr, align 4, !dbg !29
     %cmp = icmp slt i32 %3, %4, !dbg !30
     br i1 %cmp, label %for.body, label %for.end, !dbg !31
 
   for.body:                                         ; preds = %for.cond
-    %5 = load i32, i32* %a.addr, align 4, !dbg !32
-    %6 = load i32, i32* %i, align 4, !dbg !33
+    %5 = load i32, ptr %a.addr, align 4, !dbg !32
+    %6 = load i32, ptr %i, align 4, !dbg !33
     %sub = sub nsw i32 %5, %6, !dbg !34
-    %7 = load i32, i32* %i, align 4, !dbg !35
+    %7 = load i32, ptr %i, align 4, !dbg !35
     %idxprom = sext i32 %7 to i64, !dbg !36
-    %arrayidx = getelementptr inbounds i32, i32* %vla, i64 %idxprom, !dbg !36
-    store i32 %sub, i32* %arrayidx, align 4, !dbg !37
+    %arrayidx = getelementptr inbounds i32, ptr %vla, i64 %idxprom, !dbg !36
+    store i32 %sub, ptr %arrayidx, align 4, !dbg !37
     br label %for.inc, !dbg !36
 
   for.inc:                                          ; preds = %for.body
-    %8 = load i32, i32* %i, align 4, !dbg !38
+    %8 = load i32, ptr %i, align 4, !dbg !38
     %inc = add nsw i32 %8, 1, !dbg !38
-    store i32 %inc, i32* %i, align 4, !dbg !38
+    store i32 %inc, ptr %i, align 4, !dbg !38
     br label %for.cond, !dbg !39, !llvm.loop !40
 
   for.end:                                          ; preds = %for.cond
-    %9 = load i32, i32* %a.addr, align 4, !dbg !42
+    %9 = load i32, ptr %a.addr, align 4, !dbg !42
     %sub1 = sub nsw i32 %9, 1, !dbg !43
     %idxprom2 = sext i32 %sub1 to i64, !dbg !44
-    %arrayidx3 = getelementptr inbounds i32, i32* %vla, i64 %idxprom2, !dbg !44
-    %10 = load i32, i32* %arrayidx3, align 4, !dbg !44
-    %11 = load i8*, i8** %saved_stack, align 8, !dbg !45
-    call void @llvm.stackrestore(i8* %11), !dbg !45
+    %arrayidx3 = getelementptr inbounds i32, ptr %vla, i64 %idxprom2, !dbg !44
+    %10 = load i32, ptr %arrayidx3, align 4, !dbg !44
+    %11 = load ptr, ptr %saved_stack, align 8, !dbg !45
+    call void @llvm.stackrestore(ptr %11), !dbg !45
     ret i32 %10, !dbg !45
   }
 
   declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-  declare i8* @llvm.stacksave() #2
-  declare void @llvm.stackrestore(i8*) #2
+  declare ptr @llvm.stacksave() #2
+  declare void @llvm.stackrestore(ptr) #2
 
   attributes #0 = { noinline nounwind optnone uwtable }
   attributes #1 = { nounwind readnone speculatable willreturn }

diff  --git a/llvm/test/CodeGen/X86/fixup-bw-inst.mir b/llvm/test/CodeGen/X86/fixup-bw-inst.mir
index 4e997c15152f0..301bdb033fb14 100644
--- a/llvm/test/CodeGen/X86/fixup-bw-inst.mir
+++ b/llvm/test/CodeGen/X86/fixup-bw-inst.mir
@@ -4,7 +4,7 @@
   define void @test1() { ret void }
   define void @test2() { ret void }
 
-  define i16 @test3(i16* readonly %p) {
+  define i16 @test3(ptr readonly %p) {
   ; Keep original IR to show how the situation like this might happen
   ; due to preceding CG passes.
   ;
@@ -14,11 +14,11 @@
   ; the movw into movzwl because EAX is not live before the load (which
   ; can be seen by the fact that implicit EAX flag is missing).
   entry:
-    %tobool = icmp eq i16* %p, null
+    %tobool = icmp eq ptr %p, null
     br i1 %tobool, label %if.end, label %if.then
 
   if.then:                                          ; preds = %entry
-    %0 = load i16, i16* %p, align 2
+    %0 = load i16, ptr %p, align 2
     br label %if.end
 
   if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/X86/heap-alloc-markers.mir b/llvm/test/CodeGen/X86/heap-alloc-markers.mir
index be4a0d02b6863..0bf83657cb06c 100644
--- a/llvm/test/CodeGen/X86/heap-alloc-markers.mir
+++ b/llvm/test/CodeGen/X86/heap-alloc-markers.mir
@@ -3,11 +3,11 @@
 # Test the emission of heap alloc site instruction labels.
 
 --- |
-  declare i8* @alloc(i32) nounwind
+  declare ptr @alloc(i32) nounwind
 
   define i32 @test(i32 %x) nounwind !dbg !6 {
   entry:
-    call i8* @alloc(i32 %x), !dbg !11, !heapallocsite !2
+    call ptr @alloc(i32 %x), !dbg !11, !heapallocsite !2
     ret i32 0, !dbg !12
   }
 

diff  --git a/llvm/test/CodeGen/X86/implicit-null-checks.mir b/llvm/test/CodeGen/X86/implicit-null-checks.mir
index d5a5b256a5067..0077906b60181 100644
--- a/llvm/test/CodeGen/X86/implicit-null-checks.mir
+++ b/llvm/test/CodeGen/X86/implicit-null-checks.mir
@@ -5,7 +5,7 @@
   target triple = "x86_64-apple-macosx"
 
   ;; Positive test
-  define i32 @imp_null_check_with_bitwise_op_0(i32* %x, i32 %val) {
+  define i32 @imp_null_check_with_bitwise_op_0(ptr %x, i32 %val) {
   entry:
     br i1 undef, label %is_null, label %not_null, !make.implicit !0
 
@@ -24,7 +24,7 @@
 
   ;; Negative test.  The regalloc is such that we cannot hoist the
   ;; instruction materializing 2200000 into $eax
-  define i32 @imp_null_check_with_bitwise_op_1(i32* %x, i32 %val, i32* %ptr) {
+  define i32 @imp_null_check_with_bitwise_op_1(ptr %x, i32 %val, ptr %ptr) {
   entry:
     br i1 undef, label %is_null, label %not_null, !make.implicit !0
 
@@ -43,7 +43,7 @@
 
   ;; Negative test: IR is identical to
   ;; @imp_null_check_with_bitwise_op_0 but MIR 
diff ers.
-  define i32 @imp_null_check_with_bitwise_op_2(i32* %x, i32 %val) {
+  define i32 @imp_null_check_with_bitwise_op_2(ptr %x, i32 %val) {
   entry:
     br i1 undef, label %is_null, label %not_null, !make.implicit !0
 
@@ -62,7 +62,7 @@
 
   ;; Negative test: IR is identical to
   ;; @imp_null_check_with_bitwise_op_0 but MIR 
diff ers.
-  define i32 @imp_null_check_with_bitwise_op_3(i32* %x, i32 %val) {
+  define i32 @imp_null_check_with_bitwise_op_3(ptr %x, i32 %val) {
   entry:
     br i1 undef, label %is_null, label %not_null, !make.implicit !0
 
@@ -80,7 +80,7 @@
   }
 
   ;; Positive test
-  define i32 @imp_null_check_with_bitwise_op_4(i32* %x, i32 %val) {
+  define i32 @imp_null_check_with_bitwise_op_4(ptr %x, i32 %val) {
   entry:
     br i1 undef, label %is_null, label %not_null, !make.implicit !0
 
@@ -99,29 +99,29 @@
 
   declare void @f() readonly
 
-  define i32 @no_hoist_across_call(i32* %ptr) {
+  define i32 @no_hoist_across_call(ptr %ptr) {
   entry:
-    %is_null = icmp eq i32* %ptr, null
+    %is_null = icmp eq ptr %ptr, null
     br i1 %is_null, label %leave, label %stay, !make.implicit !0
 
   stay:
     call void @f()
-    %val = load i32, i32* %ptr
+    %val = load i32, ptr %ptr
     ret i32 %val
 
   leave:
     ret i32 0
   }
 
-  define i32 @dependency_live_in_hazard(i32* %ptr, i32** %ptr2, i32* %ptr3) #0 {
+  define i32 @dependency_live_in_hazard(ptr %ptr, ptr %ptr2, ptr %ptr3) #0 {
   entry:
-    %val = load i32*, i32** %ptr2
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %val = load ptr, ptr %ptr2
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:                                         ; preds = %entry
-    %addend = load i32, i32* %val
-    %result = load i32, i32* %ptr
+    %addend = load i32, ptr %val
+    %result = load i32, ptr %ptr
     %result.shr = lshr i32 %result, 4
     %result.and = and i32 %result.shr, 4095
     %result.add = add i32 %addend, %result.and
@@ -131,9 +131,9 @@
     ret i32 0
   }
 
-  define i32 @use_alternate_load_op(i32* %ptr, i32* %ptr2) {
+  define i32 @use_alternate_load_op(ptr %ptr, ptr %ptr2) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -143,26 +143,26 @@
     ret i32 0
   }
 
-  define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
+  define i32 @imp_null_check_gep_load_with_use_dep(ptr %x, i32 %a) {
   entry:
-    %c = icmp eq i32* %x, null
+    %c = icmp eq ptr %x, null
     br i1 %c, label %is_null, label %not_null, !make.implicit !0
   
   is_null:                                          ; preds = %entry
     ret i32 42
   
   not_null:                                         ; preds = %entry
-    %x.loc = getelementptr i32, i32* %x, i32 1
-    %y = ptrtoint i32* %x.loc to i32
+    %x.loc = getelementptr i32, ptr %x, i32 1
+    %y = ptrtoint ptr %x.loc to i32
     %b = add i32 %a, %y
-    %t = load i32, i32* %x
+    %t = load i32, ptr %x
     %z = add i32 %t, %b
     ret i32 %z
   }
 
-  define i32 @imp_null_check_load_with_base_sep(i32* %x, i32 %a) {
+  define i32 @imp_null_check_load_with_base_sep(ptr %x, i32 %a) {
   entry:
-    %c = icmp eq i32* %x, null
+    %c = icmp eq ptr %x, null
     br i1 %c, label %is_null, label %not_null, !make.implicit !0
   
   is_null:                                          ; preds = %entry
@@ -172,9 +172,9 @@
     ret i32 undef
   }
 
-  define void @inc_store(i32* %ptr, i32 %val) {
+  define void @inc_store(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -184,9 +184,9 @@
     ret void
   }
 
-  define void @inc_store_plus_offset(i32* %ptr, i32 %val) {
+  define void @inc_store_plus_offset(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -196,9 +196,9 @@
     ret void
   }
 
-  define void @inc_store_with_dep(i32* %ptr, i32 %val) {
+  define void @inc_store_with_dep(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -208,9 +208,9 @@
     ret void
   }
 
-  define i32 @inc_store_with_dep_in_null(i32* %ptr, i32 %val) {
+  define i32 @inc_store_with_dep_in_null(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -220,9 +220,9 @@
     ret i32 undef
   }
 
-  define void @inc_store_with_volatile(i32* %ptr, i32 %val) {
+  define void @inc_store_with_volatile(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -232,9 +232,9 @@
     ret void
   }
 
-  define void @inc_store_with_two_dep(i32* %ptr, i32 %val) {
+  define void @inc_store_with_two_dep(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -244,9 +244,9 @@
     ret void
   }
 
-  define void @inc_store_with_redefined_base(i32* %ptr, i32 %val) {
+  define void @inc_store_with_redefined_base(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -256,9 +256,9 @@
     ret void
   }
 
-  define i32 @inc_store_with_reused_base(i32* %ptr, i32 %val) {
+  define i32 @inc_store_with_reused_base(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -268,9 +268,9 @@
     ret i32 undef
   }
 
-  define i32 @inc_store_across_call(i32* %ptr) {
+  define i32 @inc_store_across_call(ptr %ptr) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -281,9 +281,9 @@
     ret i32 undef
   }
 
-  define i32 @inc_store_with_dep_in_dep(i32* %ptr, i32 %val) {
+  define i32 @inc_store_with_dep_in_dep(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -293,9 +293,9 @@
     ret i32 undef
   }
 
-  define i32 @inc_store_with_load_over_store(i32* %ptr, i32* %ptr2) {
+  define i32 @inc_store_with_load_over_store(ptr %ptr, ptr %ptr2) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -305,9 +305,9 @@
     ret i32 undef
   }
 
-  define i32 @inc_store_with_store_over_load(i32* %ptr, i32* %ptr2) {
+  define i32 @inc_store_with_store_over_load(ptr %ptr, ptr %ptr2) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -317,9 +317,9 @@
     ret i32 undef
   }
 
-  define void @inc_store_with_store_over_store(i32* %ptr, i32* %ptr2) {
+  define void @inc_store_with_store_over_store(ptr %ptr, ptr %ptr2) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -329,9 +329,9 @@
     ret void
   }
 
-  define void @inc_store_with_load_and_store(i32* %ptr, i32* %ptr2) {
+  define void @inc_store_with_load_and_store(ptr %ptr, ptr %ptr2) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -341,9 +341,9 @@
     ret void
   }
 
-  define i32 @inc_store_and_load_no_alias(i32* noalias %ptr, i32* noalias %ptr2) {
+  define i32 @inc_store_and_load_no_alias(ptr noalias %ptr, ptr noalias %ptr2) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -353,9 +353,9 @@
     ret i32 undef
   }
 
-  define i32 @inc_store_and_load_alias(i32* %ptr, i32* %ptr2) {
+  define i32 @inc_store_and_load_alias(ptr %ptr, ptr %ptr2) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -365,9 +365,9 @@
     ret i32 undef
   }
 
-  define i32 @inc_spill_dep(i32* %ptr, i32 %val) {
+  define i32 @inc_spill_dep(ptr %ptr, i32 %val) {
   entry:
-    %ptr_is_null = icmp eq i32* %ptr, null
+    %ptr_is_null = icmp eq ptr %ptr, null
     br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0
 
   not_null:
@@ -377,16 +377,16 @@
     ret i32 undef
   }
 
-  define i32 @imp_null_check_address_mul_overflow(i32* %x, i32 %a) {
+  define i32 @imp_null_check_address_mul_overflow(ptr %x, i32 %a) {
   entry:
-    %c = icmp eq i32* %x, null
+    %c = icmp eq ptr %x, null
     br i1 %c, label %is_null, label %not_null, !make.implicit !0
   
   is_null:                                          ; preds = %entry
     ret i32 42
   
   not_null:                                         ; preds = %entry
-    %y = ptrtoint i32* %x to i32
+    %y = ptrtoint ptr %x to i32
     %y64 = zext i32 %y to i64
     %b = mul i64 %y64, 9223372036854775807 ; 0X0FFFF.. i.e. 2^63 - 1
     %z = trunc i64 %b to i32

diff  --git a/llvm/test/CodeGen/X86/implicit-null-chk-reg-rewrite.mir b/llvm/test/CodeGen/X86/implicit-null-chk-reg-rewrite.mir
index 6330e8c2564d8..d5afd3df0e73d 100644
--- a/llvm/test/CodeGen/X86/implicit-null-chk-reg-rewrite.mir
+++ b/llvm/test/CodeGen/X86/implicit-null-chk-reg-rewrite.mir
@@ -1,7 +1,7 @@
 # RUN: llc -mtriple=x86_64 -run-pass=implicit-null-checks %s -o - | FileCheck %s
 --- |
 
-  define i32 @reg-rewrite(i32* %x) {
+  define i32 @reg-rewrite(ptr %x) {
   entry:
     br i1 undef, label %is_null, label %not_null, !make.implicit !0
 

diff  --git a/llvm/test/CodeGen/X86/late-remat-update.mir b/llvm/test/CodeGen/X86/late-remat-update.mir
index 4c954240c0a4a..da7b9e11820d1 100644
--- a/llvm/test/CodeGen/X86/late-remat-update.mir
+++ b/llvm/test/CodeGen/X86/late-remat-update.mir
@@ -37,7 +37,7 @@
   declare void @_Z3gooi(i32) local_unnamed_addr #1
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
   
   attributes #0 = { noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/X86/lea-opt-with-debug.mir b/llvm/test/CodeGen/X86/lea-opt-with-debug.mir
index 31774ccbf769e..8804fe0caed5c 100644
--- a/llvm/test/CodeGen/X86/lea-opt-with-debug.mir
+++ b/llvm/test/CodeGen/X86/lea-opt-with-debug.mir
@@ -9,26 +9,26 @@
 
   %struct.A = type { i32, i32, i32 }
 
-  @c = common local_unnamed_addr global %struct.A* null, align 8
+  @c = common local_unnamed_addr global ptr null, align 8
   @a = common local_unnamed_addr global i32 0, align 4
   @d = common local_unnamed_addr global i32 0, align 4
   @b = common local_unnamed_addr global i32 0, align 4
 
   define i32 @fn1() local_unnamed_addr !dbg !8 {
-    %1 = load %struct.A*, %struct.A** @c, align 8, !dbg !13
-    %2 = load i32, i32* @a, align 4, !dbg !13
+    %1 = load ptr, ptr @c, align 8, !dbg !13
+    %2 = load i32, ptr @a, align 4, !dbg !13
     %3 = sext i32 %2 to i64, !dbg !13
-    %4 = getelementptr inbounds %struct.A, %struct.A* %1, i64 %3, !dbg !13
-    %5 = ptrtoint %struct.A* %4 to i64, !dbg !13
+    %4 = getelementptr inbounds %struct.A, ptr %1, i64 %3, !dbg !13
+    %5 = ptrtoint ptr %4 to i64, !dbg !13
     %6 = trunc i64 %5 to i32, !dbg !13
-    store i32 %6, i32* @d, align 4, !dbg !13
-    %7 = getelementptr inbounds %struct.A, %struct.A* %1, i64 %3, i32 2, !dbg !14
-    tail call void @llvm.dbg.value(metadata i32* %7, i64 0, metadata !11, metadata !DIExpression()), !dbg !15
+    store i32 %6, ptr @d, align 4, !dbg !13
+    %7 = getelementptr inbounds %struct.A, ptr %1, i64 %3, i32 2, !dbg !14
+    tail call void @llvm.dbg.value(metadata ptr %7, i64 0, metadata !11, metadata !DIExpression()), !dbg !15
     br label %8, !dbg !16
 
   ; <label>:8:                                      ; preds = %8, %0
-    %9 = load i32, i32* %7, align 4, !dbg !17
-    store i32 %9, i32* @d, align 4, !dbg !17
+    %9 = load i32, ptr %7, align 4, !dbg !17
+    store i32 %9, ptr @d, align 4, !dbg !17
     br label %8, !dbg !18
   }
 

diff  --git a/llvm/test/CodeGen/X86/limit-split-cost.mir b/llvm/test/CodeGen/X86/limit-split-cost.mir
index 6d32afd71253d..1fec3d53c0bbd 100644
--- a/llvm/test/CodeGen/X86/limit-split-cost.mir
+++ b/llvm/test/CodeGen/X86/limit-split-cost.mir
@@ -27,19 +27,19 @@
     ]
   
   sw.bb:                                            ; preds = %do.body
-    tail call void @_Z3gooPKc(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0))
+    tail call void @_Z3gooPKc(ptr @.str)
     br label %sw.bb1
   
   sw.bb1:                                           ; preds = %sw.bb, %do.body
-    tail call void @_Z3gooPKc(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.1, i64 0, i64 0))
+    tail call void @_Z3gooPKc(ptr @.str.1)
     br label %sw.bb2
   
   sw.bb2:                                           ; preds = %sw.bb1, %do.body
-    tail call void @_Z3gooPKc(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.2, i64 0, i64 0))
+    tail call void @_Z3gooPKc(ptr @.str.2)
     br label %do.cond
   
   do.cond:                                          ; preds = %sw.bb2, %do.body
-    %0 = load i32, i32* @m, align 4, !tbaa !4
+    %0 = load i32, ptr @m, align 4, !tbaa !4
     %cmp = icmp eq i32 %0, 5
     br i1 %cmp, label %do.end, label %do.body
   
@@ -47,10 +47,10 @@
     ret void
   }
   
-  declare void @_Z3gooPKc(i8*) local_unnamed_addr #1
+  declare void @_Z3gooPKc(ptr) local_unnamed_addr #1
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #2
+  declare void @llvm.stackprotector(ptr, ptr) #2
   
   attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
   attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/X86/machine-cp-mask-reg.mir b/llvm/test/CodeGen/X86/machine-cp-mask-reg.mir
index 1f697c68a86c4..e3b12a81b225e 100644
--- a/llvm/test/CodeGen/X86/machine-cp-mask-reg.mir
+++ b/llvm/test/CodeGen/X86/machine-cp-mask-reg.mir
@@ -9,11 +9,11 @@
   source_filename = "test.ll"
   target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 
-  define i8 @foo(<64 x i8> %x, i64* %y, i64 %z) #0 {
+  define i8 @foo(<64 x i8> %x, ptr %y, i64 %z) #0 {
     %a = icmp eq <64 x i8> %x, zeroinitializer
     %b = bitcast <64 x i1> %a to i64
     %c = add i64 %b, %z
-    store i64 %c, i64* %y, align 8
+    store i64 %c, ptr %y, align 8
     %d = extractelement <64 x i1> %a, i32 0
     %e = zext i1 %d to i8
     ret i8 %e

diff  --git a/llvm/test/CodeGen/X86/movtopush.mir b/llvm/test/CodeGen/X86/movtopush.mir
index e7ed68de6712b..ddd30168a4066 100644
--- a/llvm/test/CodeGen/X86/movtopush.mir
+++ b/llvm/test/CodeGen/X86/movtopush.mir
@@ -9,7 +9,7 @@
 
   declare void @good(i32, i32, i32, i32)
 
-  declare void @struct(%struct.s* byval(%struct.s), i32, i32, i32)
+  declare void @struct(ptr byval(%struct.s), i32, i32, i32)
 
   ; Function Attrs: optsize
   define void @test9() #0 {
@@ -18,14 +18,14 @@
     %q = alloca i32, align 4
     %s = alloca %struct.s, align 4
     call void @good(i32 1, i32 2, i32 3, i32 4)
-    %pv = ptrtoint i32* %p to i32
-    %qv = ptrtoint i32* %q to i32
-    call void @struct(%struct.s* byval(%struct.s) %s, i32 6, i32 %qv, i32 %pv)
+    %pv = ptrtoint ptr %p to i32
+    %qv = ptrtoint ptr %q to i32
+    call void @struct(ptr byval(%struct.s) %s, i32 6, i32 %qv, i32 %pv)
     ret void
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { optsize }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/X86/peephole-fold-testrr.mir b/llvm/test/CodeGen/X86/peephole-fold-testrr.mir
index fe874ce6f58e8..99df2bc0b3436 100644
--- a/llvm/test/CodeGen/X86/peephole-fold-testrr.mir
+++ b/llvm/test/CodeGen/X86/peephole-fold-testrr.mir
@@ -5,16 +5,16 @@
   target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  define i32 @atomic(i8** %arg) {
-    %load = load atomic i8*, i8** %arg unordered, align 8
-    %cmp = icmp eq i8* %load, null
+  define i32 @atomic(ptr %arg) {
+    %load = load atomic ptr, ptr %arg unordered, align 8
+    %cmp = icmp eq ptr %load, null
     %zext = zext i1 %cmp to i32
     ret i32 %zext
   }
 
-  define i32 @nonatomic_unoptimized(i8** %arg) {
-    %load = load i8*, i8** %arg, align 8
-    %cmp = icmp eq i8* %load, null
+  define i32 @nonatomic_unoptimized(ptr %arg) {
+    %load = load ptr, ptr %arg, align 8
+    %cmp = icmp eq ptr %load, null
     %zext = zext i1 %cmp to i32
     ret i32 %zext
   }

diff  --git a/llvm/test/CodeGen/X86/peephole-recurrence.mir b/llvm/test/CodeGen/X86/peephole-recurrence.mir
index e28f2cd20ed5c..9075a0e4a8722 100644
--- a/llvm/test/CodeGen/X86/peephole-recurrence.mir
+++ b/llvm/test/CodeGen/X86/peephole-recurrence.mir
@@ -32,7 +32,7 @@
     ret i32 0
   }
 
-  define i32 @bar(i32 %a, i32* %p) {
+  define i32 @bar(i32 %a, ptr %p) {
   bb0:
     br label %bb1
 
@@ -55,7 +55,7 @@
   bb7:                                              ; preds = %bb4, %bb6
     %vreg1 = phi i32 [ 2, %bb6 ], [ 1, %bb4 ]
     %vreg2 = add i32 %vreg5, %vreg0
-    store i32 %vreg0, i32* %p
+    store i32 %vreg0, ptr %p
     %vreg3 = add i32 %vreg1, %vreg2
     %cond2 = icmp slt i32 %vreg3, 10
     br i1 %cond2, label %bb1, label %bb8

diff  --git a/llvm/test/CodeGen/X86/postra-ignore-dbg-instrs.mir b/llvm/test/CodeGen/X86/postra-ignore-dbg-instrs.mir
index c0e5b508958a1..7e37415463f45 100644
--- a/llvm/test/CodeGen/X86/postra-ignore-dbg-instrs.mir
+++ b/llvm/test/CodeGen/X86/postra-ignore-dbg-instrs.mir
@@ -18,14 +18,14 @@
 
   define dso_local void @x1(i32) !dbg !11 {
     %2 = alloca i32, align 4
-    store i32 %0, i32* %2, align 4
-    call void @llvm.dbg.declare(metadata i32* %2, metadata !14, metadata !DIExpression()), !dbg !16
-    %3 = load i32, i32* @x0, align 4, !dbg !16
+    store i32 %0, ptr %2, align 4
+    call void @llvm.dbg.declare(metadata ptr %2, metadata !14, metadata !DIExpression()), !dbg !16
+    %3 = load i32, ptr @x0, align 4, !dbg !16
     %4 = icmp ne i32 %3, 0, !dbg !16
     br i1 %4, label %5, label %7, !dbg !16
 
   ; <label>:5:                                      ; preds = %1
-    %6 = load i32, i32* %2, align 4, !dbg !16
+    %6 = load i32, ptr %2, align 4, !dbg !16
     call void @x3(i32 0, i32 %6), !dbg !16
     br label %7, !dbg !16
 

diff  --git a/llvm/test/CodeGen/X86/pr38952.mir b/llvm/test/CodeGen/X86/pr38952.mir
index f085f6601396f..d67174aa4847c 100644
--- a/llvm/test/CodeGen/X86/pr38952.mir
+++ b/llvm/test/CodeGen/X86/pr38952.mir
@@ -7,7 +7,7 @@
   target triple = "x86_64-unknown-linux-gnu"
   
   ; Function Attrs: noinline norecurse nounwind uwtable
-  define dso_local i32 @main(i32 %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 {
+  define dso_local i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 {
   entry:
     br label %if.end
   if.end:

diff  --git a/llvm/test/CodeGen/X86/pr51903.mir b/llvm/test/CodeGen/X86/pr51903.mir
index f9d0416874a4c..1986e9d815ef0 100644
--- a/llvm/test/CodeGen/X86/pr51903.mir
+++ b/llvm/test/CodeGen/X86/pr51903.mir
@@ -16,21 +16,21 @@
     br label %for.body
 
   for.body:                                         ; preds = %for.body, %entry
-    %0 = load i32, i32* @a, align 4
+    %0 = load i32, ptr @a, align 4
     %conv = sext i32 %0 to i64
-    %1 = load i64, i64* undef, align 8
+    %1 = load i64, ptr undef, align 8
     %or = or i64 %1, %conv
-    store i64 %or, i64* undef, align 8
+    store i64 %or, ptr undef, align 8
     call void @e()
     %cmp4 = icmp eq i64 %or, 0
     %conv5 = zext i1 %cmp4 to i32
     %conv6 = trunc i32 %conv5 to i8
-    store i8 %conv6, i8* undef, align 1
+    store i8 %conv6, ptr undef, align 1
     %conv7 = sext i8 %conv6 to i32
     %bf.cast = trunc i40 undef to i32
     %xor = xor i32 %conv7, %bf.cast
     %conv8 = sext i32 %xor to i64
-    store i64 %conv8, i64* undef, align 8
+    store i64 %conv8, ptr undef, align 8
     br label %for.body
   }
 
@@ -67,19 +67,19 @@ body:             |
   ; CHECK:   [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @a, $noreg :: (load (s64) from got)
   ; CHECK:   [[MOVSX64rm32_:%[0-9]+]]:gr64 = MOVSX64rm32 killed [[MOV64rm]], 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @a)
   ; CHECK:   [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF
-  ; CHECK:   OR64mr [[DEF]], 1, $noreg, 0, $noreg, [[MOVSX64rm32_]], implicit-def $eflags :: (store (s64) into `i64* undef`), (load (s64) from `i64* undef`)
+  ; CHECK:   OR64mr [[DEF]], 1, $noreg, 0, $noreg, [[MOVSX64rm32_]], implicit-def $eflags :: (store (s64) into `ptr undef`), (load (s64) from `ptr undef`)
   ; CHECK:   [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
   ; CHECK:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
   ; CHECK:   CALL64pcrel32 target-flags(x86-plt) @e, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp
   ; CHECK:   ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
   ; CHECK:   [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
   ; CHECK:   [[DEF1:%[0-9]+]]:gr64 = IMPLICIT_DEF
-  ; CHECK:   MOV8mr [[DEF1]], 1, $noreg, 0, $noreg, [[SETCCr]] :: (store (s8) into `i8* undef`)
+  ; CHECK:   MOV8mr [[DEF1]], 1, $noreg, 0, $noreg, [[SETCCr]] :: (store (s8) into `ptr undef`)
   ; CHECK:   [[DEF2:%[0-9]+]]:gr32 = IMPLICIT_DEF
   ; CHECK:   [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[MOVZX32rr8_]], [[DEF2]], implicit-def $eflags
   ; CHECK:   [[MOVSX64rr32_:%[0-9]+]]:gr64 = MOVSX64rr32 [[XOR32rr]]
   ; CHECK:   [[DEF3:%[0-9]+]]:gr64 = IMPLICIT_DEF
-  ; CHECK:   MOV64mr [[DEF3]], 1, $noreg, 0, $noreg, [[MOVSX64rr32_]] :: (store (s64) into `i64* undef`)
+  ; CHECK:   MOV64mr [[DEF3]], 1, $noreg, 0, $noreg, [[MOVSX64rr32_]] :: (store (s64) into `ptr undef`)
   ; CHECK:   JMP_1 %bb.1
   bb.0.entry:
     JMP_1 %bb.1
@@ -88,7 +88,7 @@ body:             |
     %7:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @a, $noreg :: (load (s64) from got)
     %8:gr64 = MOVSX64rm32 killed %7, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @a)
     %9:gr64 = IMPLICIT_DEF
-    OR64mr %9, 1, $noreg, 0, $noreg, %8, implicit-def $eflags :: (store (s64) into `i64* undef`), (load (s64) from `i64* undef`)
+    OR64mr %9, 1, $noreg, 0, $noreg, %8, implicit-def $eflags :: (store (s64) into `ptr undef`), (load (s64) from `ptr undef`)
     %10:gr64 = COPY $eflags
     ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
     CALL64pcrel32 target-flags(x86-plt) @e, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp
@@ -97,12 +97,12 @@ body:             |
     %11:gr8 = SETCCr 4, implicit $eflags
     %4:gr32 = MOVZX32rr8 killed %11
     %12:gr64 = IMPLICIT_DEF
-    SETCCm %12, 1, $noreg, 0, $noreg, 4, implicit $eflags :: (store (s8) into `i8* undef`)
+    SETCCm %12, 1, $noreg, 0, $noreg, 4, implicit $eflags :: (store (s8) into `ptr undef`)
     %5:gr32 = IMPLICIT_DEF
     %6:gr32 = XOR32rr %4, %5, implicit-def $eflags
     %3:gr64 = MOVSX64rr32 %6
     %0:gr64 = IMPLICIT_DEF
-    MOV64mr %0, 1, $noreg, 0, $noreg, %3 :: (store (s64) into `i64* undef`)
+    MOV64mr %0, 1, $noreg, 0, $noreg, %3 :: (store (s64) into `ptr undef`)
     JMP_1 %bb.1
 
 ...

diff  --git a/llvm/test/CodeGen/X86/pre-coalesce.mir b/llvm/test/CodeGen/X86/pre-coalesce.mir
index aa7481adbaef6..925cc4ba221b2 100644
--- a/llvm/test/CodeGen/X86/pre-coalesce.mir
+++ b/llvm/test/CodeGen/X86/pre-coalesce.mir
@@ -6,15 +6,15 @@
   target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
   
-  @b = common local_unnamed_addr global i8* null, align 8
+  @b = common local_unnamed_addr global ptr null, align 8
   @a = common local_unnamed_addr global i32 0, align 4
   
   define i32 @foo() local_unnamed_addr {
   entry:
-    %t0 = load i8*, i8** @b, align 8
-    %t1 = load i8, i8* %t0, align 1
+    %t0 = load ptr, ptr @b, align 8
+    %t1 = load i8, ptr %t0, align 1
     %cmp4 = icmp eq i8 %t1, 0
-    %t2 = load i32, i32* @a, align 4
+    %t2 = load i32, ptr @a, align 4
     br i1 %cmp4, label %while.end, label %while.body.preheader
   
   while.body.preheader:                             ; preds = %entry
@@ -26,8 +26,8 @@
     %conv = sext i8 %t4 to i32
     %add = mul i32 %t3, 33
     %add3 = add nsw i32 %add, %conv
-    store i32 %add3, i32* @a, align 4
-    %t5 = load i8, i8* %t0, align 1
+    store i32 %add3, ptr @a, align 4
+    %t5 = load i8, ptr %t0, align 1
     %cmp = icmp eq i8 %t5, 0
     br i1 %cmp, label %while.end, label %while.body
   

diff  --git a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
index 591ba6402062e..13b5a541fa228 100644
--- a/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
+++ b/llvm/test/CodeGen/X86/regalloc-copy-hints.mir
@@ -5,7 +5,7 @@
 --- |
   define void @fun() { ret void }
 
-  declare noalias nonnull i8* @_Znwj()
+  declare noalias nonnull ptr @_Znwj()
   declare void @_ZNSt3__127__tree_balance_after_insertIPNS_16__tree_node_baseIPvEEEEvT_S5_()
   declare zeroext i1 @_ZN15COLLADASaxFWL1429ColladaParserAutoGen14Private14_end__commentsEv()
   declare zeroext i1 @_ZN15COLLADASaxFWL1429ColladaParserAutoGen14Private15_preEnd__authorEv()

diff  --git a/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir b/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir
index c71a8e7c2d9a6..aa7befc18d4fe 100644
--- a/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir
+++ b/llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir
@@ -11,7 +11,7 @@
     %c.addr = alloca i32, align 4
     tail call void @llvm.dbg.value(metadata i32 %d, metadata !13, metadata !DIExpression()), !dbg !19
     tail call void @llvm.dbg.value(metadata i32 %c, metadata !14, metadata !DIExpression()), !dbg !20
-    store i32 %c, i32* %c.addr, align 4, !tbaa !21
+    store i32 %c, ptr %c.addr, align 4, !tbaa !21
     tail call void @llvm.dbg.value(metadata i32 %b, metadata !15, metadata !DIExpression()), !dbg !25
     tail call void @llvm.dbg.value(metadata i32 %a, metadata !16, metadata !DIExpression()), !dbg !26
     %cmp = icmp slt i32 %a, %b, !dbg !27
@@ -24,8 +24,8 @@
   for.cond:                                         ; preds = %for.cond, %for.cond.preheader
     %lsr.iv = phi i32 [ %lsr.iv.next, %for.cond ], [ %0, %for.cond.preheader ]
     call void @llvm.dbg.value(metadata i32 undef, metadata !17, metadata !DIExpression()), !dbg !32
-    call void @llvm.dbg.value(metadata i32* %c.addr, metadata !14, metadata !DIExpression()), !dbg !20
-    %call = call i32 @doSomething(i32* nonnull %c.addr) #3, !dbg !33
+    call void @llvm.dbg.value(metadata ptr %c.addr, metadata !14, metadata !DIExpression()), !dbg !20
+    %call = call i32 @doSomething(ptr nonnull %c.addr) #3, !dbg !33
     call void @llvm.dbg.value(metadata !2, metadata !17, metadata !DIExpression()), !dbg !32
     %lsr.iv.next = add i32 %lsr.iv, 1, !dbg !30
     %cmp1 = icmp slt i32 %lsr.iv.next, %d, !dbg !30
@@ -36,13 +36,13 @@
     ret i32 %retval.0, !dbg !37
   }
   
-  declare i32 @doSomething(i32*) local_unnamed_addr
+  declare i32 @doSomething(ptr) local_unnamed_addr
   
   ; Function Attrs: nounwind readnone speculatable
   declare void @llvm.dbg.value(metadata, metadata, metadata) #2
   
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #3
+  declare void @llvm.stackprotector(ptr, ptr) #3
   
   attributes #0 = { nounwind }
   attributes #2 = { nounwind readnone speculatable }

diff  --git a/llvm/test/CodeGen/X86/stack-folding-bmi2.mir b/llvm/test/CodeGen/X86/stack-folding-bmi2.mir
index 60892ba0e5e46..0fdba213cd615 100644
--- a/llvm/test/CodeGen/X86/stack-folding-bmi2.mir
+++ b/llvm/test/CodeGen/X86/stack-folding-bmi2.mir
@@ -25,7 +25,7 @@
   }
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { nounwind "target-features"="+bmi2" }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/X86/stack-folding-fp-nofpexcept.mir b/llvm/test/CodeGen/X86/stack-folding-fp-nofpexcept.mir
index 017594dca8b9e..a0e1cb9408764 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-nofpexcept.mir
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-nofpexcept.mir
@@ -10,7 +10,7 @@
     ret <2 x double> %1
   }
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #0
+  declare void @llvm.stackprotector(ptr, ptr) #0
 
   attributes #0 = { nounwind }
 

diff  --git a/llvm/test/CodeGen/X86/statepoint-fixup-call.mir b/llvm/test/CodeGen/X86/statepoint-fixup-call.mir
index 797f43d2054d9..6ab95c2ebd0c9 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-call.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-call.mir
@@ -8,20 +8,20 @@
 
   declare void @foo()
 
-  define i8 addrspace(1)* @test_one(i8 addrspace(1)* %p) gc "statepoint-example" {
+  define ptr addrspace(1) @test_one(ptr addrspace(1) %p) gc "statepoint-example" {
   entry:
-    %token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i8 addrspace(1)* %p) ]
-    %p2 = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %token, i32 0, i32 0) ; (%p, %p)
-    ret i8 addrspace(1)* %p2
+    %token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) %p) ]
+    %p2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 0, i32 0) ; (%p, %p)
+    ret ptr addrspace(1) %p2
   }
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 immarg, i32 immarg, void ()*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   ; Function Attrs: nounwind readonly
-  declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { nounwind readonly }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop-neg.mir b/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop-neg.mir
index 51ec98181ffd4..c87b287241518 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop-neg.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop-neg.mir
@@ -12,17 +12,17 @@
 
   declare void @foo(i64)
 
-  define i8 addrspace(1)* @test(i64 %a, i64 %b, i64 %c, i8 addrspace(1)* %p) gc "statepoint-example" {
+  define ptr addrspace(1) @test(i64 %a, i64 %b, i64 %c, ptr addrspace(1) %p) gc "statepoint-example" {
   entry:
-    %token = call token (i64, i32, void (i64)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi64f(i64 0, i32 0, void (i64)* elementtype(void (i64)) @foo, i32 1, i32 0, i64 %b, i3 0, i32 0) [ "deopt"(i64 %b), "gc-live"(i8 addrspace(1)* %p) ]
-    %p2 = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %token, i32 0, i32 0) ; (%p, %p)
-    ret i8 addrspace(1)* %p2
+    %token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void (i64)) @foo, i32 1, i32 0, i64 %b, i3 0, i32 0) [ "deopt"(i64 %b), "gc-live"(ptr addrspace(1) %p) ]
+    %p2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 0, i32 0) ; (%p, %p)
+    ret ptr addrspace(1) %p2
   }
 
   ; Function Attrs: nounwind readonly
-  declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidi64f(i64 immarg, i32 immarg, void (i64)*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   attributes #0 = { nounwind readonly }
 

diff  --git a/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop.mir b/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop.mir
index d03c3ebce5622..6b3d3db98b9f3 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-copy-prop.mir
@@ -6,8 +6,8 @@
 
   declare void @foo(i64)
 
-  define i8 addrspace(1)* @test_cp(i64 %a, i64 %b, i64 %c, i8 addrspace(1)* %p) gc "statepoint-example" {
-    ret i8 addrspace(1)* undef
+  define ptr addrspace(1) @test_cp(i64 %a, i64 %b, i64 %c, ptr addrspace(1) %p) gc "statepoint-example" {
+    ret ptr addrspace(1) undef
   }
 ...
 ---

diff  --git a/llvm/test/CodeGen/X86/statepoint-fixup-invoke.mir b/llvm/test/CodeGen/X86/statepoint-fixup-invoke.mir
index 3da698191ed2e..f29dbfaf0622a 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-invoke.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-invoke.mir
@@ -7,38 +7,38 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-pc-linux-gnu"
 
-  declare void @some_call(i64 addrspace(1)*)
+  declare void @some_call(ptr addrspace(1))
 
   declare i32 @personality_function()
 
-  define i64 addrspace(1)* @test_basic(i64 addrspace(1)* %obj, i64 addrspace(1)* %obj1) gc "statepoint-example" personality i32 ()* @personality_function {
+  define ptr addrspace(1) @test_basic(ptr addrspace(1) %obj, ptr addrspace(1) %obj1) gc "statepoint-example" personality ptr @personality_function {
   entry:
-    %0 = invoke token (i64, i32, void (i64 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i64f(i64 0, i32 0, void (i64 addrspace(1)*)* elementtype(void (i64 addrspace(1)*)) @some_call, i32 1, i32 0, i64 addrspace(1)* %obj, i32 0, i32 0) [ "gc-live"(i64 addrspace(1)* %obj, i64 addrspace(1)* %obj1), "deopt"(i32 0, i32 -1, i32 0, i32 0, i32 0) ]
+    %0 = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void (ptr addrspace(1))) @some_call, i32 1, i32 0, ptr addrspace(1) %obj, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) %obj, ptr addrspace(1) %obj1), "deopt"(i32 0, i32 -1, i32 0, i32 0, i32 0) ]
             to label %invoke_safepoint_normal_dest unwind label %exceptional_return
 
   invoke_safepoint_normal_dest:                     ; preds = %entry
-    %obj.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %0, i32 0, i32 0) ; (%obj, %obj)
-    %obj1.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %0, i32 1, i32 1) ; (%obj1, %obj1)
+    %obj.relocated = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %0, i32 0, i32 0) ; (%obj, %obj)
+    %obj1.relocated = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %0, i32 1, i32 1) ; (%obj1, %obj1)
     br label %normal_return
 
   normal_return:                                    ; preds = %invoke_safepoint_normal_dest
-    ret i64 addrspace(1)* %obj.relocated
+    ret ptr addrspace(1) %obj.relocated
 
   exceptional_return:                               ; preds = %entry
     %landing_pad = landingpad token
             cleanup
-    %obj.relocated1 = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %landing_pad, i32 0, i32 0) ; (%obj, %obj)
-    %obj1.relocated1 = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %landing_pad, i32 1, i32 1) ; (%obj1, %obj1)
-    ret i64 addrspace(1)* %obj1.relocated1
+    %obj.relocated1 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %landing_pad, i32 0, i32 0) ; (%obj, %obj)
+    %obj1.relocated1 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %landing_pad, i32 1, i32 1) ; (%obj1, %obj1)
+    ret ptr addrspace(1) %obj1.relocated1
   }
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidp1i64f(i64 immarg, i32 immarg, void (i64 addrspace(1)*)*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   ; Function Attrs: nounwind readonly
-  declare i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { nounwind readonly }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/X86/statepoint-fixup-shared-ehpad.mir b/llvm/test/CodeGen/X86/statepoint-fixup-shared-ehpad.mir
index 0b59e32f15f18..d16c3d93cfc20 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-shared-ehpad.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-shared-ehpad.mir
@@ -17,44 +17,44 @@
 
   declare i32 @personality_function()
 
-  define i8 addrspace(1)* @test_one(i32 %a, i8 addrspace(1)* %p, i8 addrspace(1)* %q) gc "statepoint-example" personality i32 ()* @personality_function {
+  define ptr addrspace(1) @test_one(i32 %a, ptr addrspace(1) %p, ptr addrspace(1) %q) gc "statepoint-example" personality ptr @personality_function {
   entry:
     %cmp = icmp eq i32 %a, 0
     br i1 %cmp, label %zero, label %nonzero
 
   zero:                                             ; preds = %entry
-    %token = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i8 addrspace(1)* %p, i8 addrspace(1)* %q) ]
+    %token = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @foo, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) %p, ptr addrspace(1) %q) ]
             to label %normal_dest_a unwind label %exceptional_return_a
 
   nonzero:                                          ; preds = %entry
-    %token2 = invoke token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @bar, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i8 addrspace(1)* %p, i8 addrspace(1)* %q) ]
+    %token2 = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @bar, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) %p, ptr addrspace(1) %q) ]
             to label %normal_dest_b unwind label %exceptional_return_b
 
   normal_dest_a:                                    ; preds = %zero
-    %p2 = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %token, i32 0, i32 0) ; (%p, %p)
-    ret i8 addrspace(1)* %p2
+    %p2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 0, i32 0) ; (%p, %p)
+    ret ptr addrspace(1) %p2
 
   exceptional_return_a:                             ; preds = %zero
     %landing_pad = landingpad token
             cleanup
-    %q2 = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %landing_pad, i32 1, i32 1) ; (%q, %q)
-    ret i8 addrspace(1)* %q2
+    %q2 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %landing_pad, i32 1, i32 1) ; (%q, %q)
+    ret ptr addrspace(1) %q2
 
   normal_dest_b:                                    ; preds = %nonzero
-    %p3 = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %token2, i32 0, i32 0) ; (%p, %p)
-    ret i8 addrspace(1)* %p3
+    %p3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token2, i32 0, i32 0) ; (%p, %p)
+    ret ptr addrspace(1) %p3
 
   exceptional_return_b:                             ; preds = %nonzero
     %landing_pad2 = landingpad token
             cleanup
-    %q3 = call i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %landing_pad2, i32 1, i32 1) ; (%q, %q)
-    ret i8 addrspace(1)* %q3
+    %q3 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %landing_pad2, i32 1, i32 1) ; (%q, %q)
+    ret ptr addrspace(1) %q3
   }
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 immarg, i32 immarg, void ()*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   ; Function Attrs: nounwind readonly
-  declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
   attributes #0 = { nounwind readonly }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/X86/statepoint-fixup-undef-def.mir b/llvm/test/CodeGen/X86/statepoint-fixup-undef-def.mir
index f7f4c1e987492..7c48625e99353 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-undef-def.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-undef-def.mir
@@ -7,33 +7,33 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  define void @test_undef(i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2, i8 addrspace(1)* %arg3, i8 addrspace(1)* %arg4) #0 gc "statepoint-example" {
+  define void @test_undef(ptr addrspace(1) %arg1, ptr addrspace(1) %arg2, ptr addrspace(1) %arg3, ptr addrspace(1) %arg4) #0 gc "statepoint-example" {
   bb:
     %tmp1 = lshr i32 0, undef
-    %tmp2 = load atomic i32, i32 addrspace(1)* undef unordered, align 8
-    %tmp3 = load atomic i32, i32 addrspace(1)* undef unordered, align 8
-    %tmp19 = inttoptr i64 undef to i8 addrspace(1)*
+    %tmp2 = load atomic i32, ptr addrspace(1) undef unordered, align 8
+    %tmp3 = load atomic i32, ptr addrspace(1) undef unordered, align 8
+    %tmp19 = inttoptr i64 undef to ptr addrspace(1)
     br label %bb7
 
   bb7:                                              ; preds = %bb
     %tmp4 = icmp slt i32 %tmp3, undef
     %tmp5 = select i1 %tmp4, i32 6, i32 undef
     %tmp6 = add i32 %tmp5, %tmp2
-    %tmp7 = call i8 addrspace(1)* @wombat()
-    %tmp20 = call token (i64, i32, void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8i32i32p1i8i32f(i64 2, i32 5, void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)* nonnull elementtype(void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)) @hoge, i32 5, i32 0, i8 addrspace(1)* %tmp19, i32 %tmp2, i32 %tmp6, i8 addrspace(1)* %tmp7, i32 0, i32 0, i32 0) [ "deopt"(i8 addrspace(1)* %tmp19, i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg3, i8 addrspace(1)* %arg4, i32 %tmp2, i32 %tmp1, i32 %tmp5), "gc-live"(i8 addrspace(1)* %tmp19, i8 addrspace(1)* %arg2) ]
-    %tmp21 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp20, i32 0, i32 0) ; (%tmp19, %tmp19)
-    %tmp22 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp20, i32 1, i32 1) ; (%arg2, %arg2)
+    %tmp7 = call ptr addrspace(1) @wombat()
+    %tmp20 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void (ptr addrspace(1), i32, i32, ptr addrspace(1), i32)) @hoge, i32 5, i32 0, ptr addrspace(1) %tmp19, i32 %tmp2, i32 %tmp6, ptr addrspace(1) %tmp7, i32 0, i32 0, i32 0) [ "deopt"(ptr addrspace(1) %tmp19, ptr addrspace(1) %arg1, ptr addrspace(1) %arg3, ptr addrspace(1) %arg4, i32 %tmp2, i32 %tmp1, i32 %tmp5), "gc-live"(ptr addrspace(1) %tmp19, ptr addrspace(1) %arg2) ]
+    %tmp21 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp20, i32 0, i32 0) ; (%tmp19, %tmp19)
+    %tmp22 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp20, i32 1, i32 1) ; (%arg2, %arg2)
     ret void
   }
 
-  declare void @hoge(i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32) #0
+  declare void @hoge(ptr addrspace(1), i32, i32, ptr addrspace(1), i32) #0
 
-  declare i8 addrspace(1)* @wombat() #0
+  declare ptr addrspace(1) @wombat() #0
 
   ; Function Attrs: nounwind readonly
-  declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #1
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #1
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidp1i8i32i32p1i8i32f(i64 immarg, i32 immarg, void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   attributes #0 = { "target-cpu"="broadwell" }
   attributes #1 = { nounwind readonly }
@@ -98,7 +98,7 @@ body:             |
   ; CHECK:   renamable $r12 = COPY $rdx
   ; CHECK:   renamable $r14 = COPY $rsi
   ; CHECK:   renamable $r13 = COPY $rdi
-  ; CHECK:   renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `i32 addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK:   renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `ptr addrspace(1) undef`, align 8, addrspace 1)
   ; CHECK: bb.1.bb7:
   ; CHECK:   liveins: $rbx, $r12, $r13, $r14, $r15
   ; CHECK:   renamable $ebp = LEA64_32r renamable $rbx, 1, $noreg, 6, $noreg
@@ -126,7 +126,7 @@ body:             |
     renamable $r12 = COPY $rdx
     renamable $r14 = COPY $rsi
     renamable $r13 = COPY $rdi
-    renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `i32 addrspace(1)* undef`, align 8, addrspace 1)
+    renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `ptr addrspace(1) undef`, align 8, addrspace 1)
 
   bb.1.bb7:
     liveins: $rbx, $r12, $r13, $r14, $r15

diff  --git a/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir b/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
index e06c1dfa19110..0adccba88a327 100644
--- a/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
+++ b/llvm/test/CodeGen/X86/statepoint-fixup-undef.mir
@@ -8,30 +8,30 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  define void @test_undef(i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2, i8 addrspace(1)* %arg3, i8 addrspace(1)* %arg4) #0 gc "statepoint-example" {
+  define void @test_undef(ptr addrspace(1) %arg1, ptr addrspace(1) %arg2, ptr addrspace(1) %arg3, ptr addrspace(1) %arg4) #0 gc "statepoint-example" {
   bb:
     %tmp1 = lshr i32 0, undef
-    %tmp2 = load atomic i32, i32 addrspace(1)* undef unordered, align 8
-    %tmp3 = load atomic i32, i32 addrspace(1)* undef unordered, align 8
+    %tmp2 = load atomic i32, ptr addrspace(1) undef unordered, align 8
+    %tmp3 = load atomic i32, ptr addrspace(1) undef unordered, align 8
     br label %bb7
 
   bb7:                                              ; preds = %bb
     %tmp4 = icmp slt i32 %tmp3, undef
     %tmp5 = select i1 %tmp4, i32 6, i32 undef
     %tmp6 = add i32 %tmp5, %tmp2
-    %tmp7 = call i8 addrspace(1)* @wombat()
-    %tmp20 = call token (i64, i32, void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8i32i32p1i8i32f(i64 2, i32 5, void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)* nonnull elementtype(void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)) @hoge, i32 5, i32 0, i8 addrspace(1)* %arg3, i32 %tmp2, i32 %tmp6, i8 addrspace(1)* %tmp7, i32 0, i32 0, i32 0) [ "deopt"(i8 addrspace(1)* %arg2, i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg3, i8 addrspace(1)* %arg4, i32 %tmp2, i32 %tmp1, i32 %tmp5), "gc-live"() ]
+    %tmp7 = call ptr addrspace(1) @wombat()
+    %tmp20 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void (ptr addrspace(1), i32, i32, ptr addrspace(1), i32)) @hoge, i32 5, i32 0, ptr addrspace(1) %arg3, i32 %tmp2, i32 %tmp6, ptr addrspace(1) %tmp7, i32 0, i32 0, i32 0) [ "deopt"(ptr addrspace(1) %arg2, ptr addrspace(1) %arg1, ptr addrspace(1) %arg3, ptr addrspace(1) %arg4, i32 %tmp2, i32 %tmp1, i32 %tmp5), "gc-live"() ]
     ret void
   }
 
-  declare void @hoge(i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32) #0
+  declare void @hoge(ptr addrspace(1), i32, i32, ptr addrspace(1), i32) #0
 
-  declare i8 addrspace(1)* @wombat() #0
+  declare ptr addrspace(1) @wombat() #0
 
   ; Function Attrs: nounwind readonly
-  declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #1
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #1
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidp1i8i32i32p1i8i32f(i64 immarg, i32 immarg, void (i8 addrspace(1)*, i32, i32, i8 addrspace(1)*, i32)*, i32 immarg, i32 immarg, ...) #0
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...) #0
 
   attributes #0 = { "target-cpu"="broadwell" }
   attributes #1 = { nounwind readonly "target-cpu"="broadwell" }
@@ -99,7 +99,7 @@ body:             |
   ; CHECK:   renamable $r14 = COPY $rdx
   ; CHECK:   renamable $r13 = COPY $rsi
   ; CHECK:   renamable $r12 = COPY $rdi
-  ; CHECK:   renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `i32 addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK:   renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `ptr addrspace(1) undef`, align 8, addrspace 1)
   ; CHECK: bb.1.bb7:
   ; CHECK:   liveins: $rbx, $r12, $r13, $r14, $r15
   ; CHECK:   renamable $ebp = LEA64_32r renamable $rbx, 1, $noreg, 6, $noreg
@@ -207,7 +207,7 @@ body:             |
     renamable $r14 = COPY $rdx
     renamable $r13 = COPY $rsi
     renamable $r12 = COPY $rdi
-    renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `i32 addrspace(1)* undef`, align 8, addrspace 1)
+    renamable $ebx = MOV32rm undef renamable $rax, 1, $noreg, 0, $noreg, implicit-def $rbx :: (load unordered (s32) from `ptr addrspace(1) undef`, align 8, addrspace 1)
 
   bb.1.bb7:
     liveins: $rbx, $r12, $r13, $r14, $r15

diff  --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
index 09d003d5ab8e4..2170573f4b0d9 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-enter-at-end.mir
@@ -8,134 +8,130 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  @global = external global i8 addrspace(1)*, align 8
-  @global.1 = external global i8 addrspace(1)*, align 8
+  @global = external global ptr addrspace(1), align 8
+  @global.1 = external global ptr addrspace(1), align 8
 
-  define void @bar(i8 addrspace(1)* %arg) gc "statepoint-example" personality i32* ()* @zot {
+  define void @bar(ptr addrspace(1) %arg) gc "statepoint-example" personality ptr @zot {
   bb:
-    %tmp = inttoptr i64 undef to i8 addrspace(1)*
+    %tmp = inttoptr i64 undef to ptr addrspace(1)
     br label %bb1
 
   bb1:                                              ; preds = %bb64, %bb
-    %tmp3 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2, i32 5, void ()* nonnull elementtype(void ()) @wibble, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 2, i32 1, i32 0, i32 0, i32 5, i32 0, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null), "gc-live"(i8 addrspace(1)* %tmp) ]
-    %tmp4 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp3, i32 0, i32 0) ; (%tmp, %tmp)
-    %tmp5 = call token (i64, i32, void (i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 2882400000, i32 0, void (i8 addrspace(1)*)* nonnull elementtype(void (i8 addrspace(1)*)) @barney, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 9, i32 1, i32 9, i32 0, i32 5, i32 1, i32 7, i8* null, i32 8, i32 2, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 8, i32 2), "gc-live"(i8 addrspace(1)* undef, i8 addrspace(1)* %tmp4) ]
-    %tmp6 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp5, i32 1, i32 1) ; (%tmp4, %tmp4)
-    %tmp7 = call token (i64, i32, i8 addrspace(1)* ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_p1i8f(i64 2, i32 5, i8 addrspace(1)* ()* nonnull elementtype(i8 addrspace(1)* ()) @blam, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 10, i32 1, i32 9, i32 0, i32 5, i32 1, i32 7, i8* null, i32 8, i32 2, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 8, i32 2), "gc-live"(i8 addrspace(1)* %tmp6, i8 addrspace(1)* undef) ]
-    %tmp8 = call align 8 "java-type-kid"="69" i8 addrspace(1)* @llvm.experimental.gc.result.p1i8(token %tmp7)
-    %tmp9 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp7, i32 0, i32 0) ; (%tmp6, %tmp6)
+    %tmp3 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void ()) @wibble, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 2, i32 1, i32 0, i32 0, i32 5, i32 0, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null), "gc-live"(ptr addrspace(1) %tmp) ]
+    %tmp4 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp3, i32 0, i32 0) ; (%tmp, %tmp)
+    %tmp5 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr nonnull elementtype(void (ptr addrspace(1))) @barney, i32 1, i32 0, ptr addrspace(1) undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 9, i32 1, i32 9, i32 0, i32 5, i32 1, i32 7, ptr null, i32 8, i32 2, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 8, i32 2), "gc-live"(ptr addrspace(1) undef, ptr addrspace(1) %tmp4) ]
+    %tmp6 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp5, i32 1, i32 1) ; (%tmp4, %tmp4)
+    %tmp7 = call token (i64, i32, ptr addrspace(1) ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr addrspace(1) ()* nonnull elementtype(ptr addrspace(1) ()) @blam, i32 0, i32 0, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 10, i32 1, i32 9, i32 0, i32 5, i32 1, i32 7, ptr null, i32 8, i32 2, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 8, i32 2), "gc-live"(ptr addrspace(1) %tmp6, ptr addrspace(1) undef) ]
+    %tmp8 = call align 8 "java-type-kid"="69" ptr addrspace(1) @llvm.experimental.gc.result.p1(token %tmp7)
+    %tmp9 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp7, i32 0, i32 0) ; (%tmp6, %tmp6)
     br i1 undef, label %bb64, label %bb10
 
   bb10:                                             ; preds = %bb1
-    %tmp11 = inttoptr i64 undef to i8 addrspace(1)*
-    %tmp12 = call i8 addrspace(1)* @wobble.3(i8 addrspace(1)* undef, i8 addrspace(1)* addrspace(1)* undef)
-    %tmp13 = select i1 false, i8 addrspace(1)* null, i8 addrspace(1)* %tmp12
+    %tmp11 = inttoptr i64 undef to ptr addrspace(1)
+    %tmp12 = call ptr addrspace(1) @wobble.3(ptr addrspace(1) undef, ptr addrspace(1) undef)
+    %tmp13 = select i1 false, ptr addrspace(1) null, ptr addrspace(1) %tmp12
     %tmp14 = extractvalue { i32, i1 } undef, 1
     br i1 %tmp14, label %bb17, label %bb15
 
   bb15:                                             ; preds = %bb10
-    %tmp16 = call token (i64, i32, void (i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 2882400000, i32 0, void (i8 addrspace(1)*)* nonnull elementtype(void (i8 addrspace(1)*)) @barney, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 2, i32 1, i32 71, i32 0, i32 5, i32 0, i32 0, i8 addrspace(1)* %tmp13, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 2, i32 5, i32 1, i32 0, i32 2, i32 0, i32 0, i8 addrspace(1)* %tmp8, i32 7, i8* null, i32 1, i32 6, i32 0, i32 0, i32 1, i32 1, i32 0, i8 addrspace(1)* %tmp8, i32 8, i32 10), "gc-live"(i8 addrspace(1)* %tmp9, i8 addrspace(1)* %tmp13, i8 addrspace(1)* %tmp11, i8 addrspace(1)* undef, i8 addrspace(1)* %tmp8) ]
+    %tmp16 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr nonnull elementtype(void (ptr addrspace(1))) @barney, i32 1, i32 0, ptr addrspace(1) undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 2, i32 1, i32 71, i32 0, i32 5, i32 0, i32 0, ptr addrspace(1) %tmp13, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 2, i32 5, i32 1, i32 0, i32 2, i32 0, i32 0, ptr addrspace(1) %tmp8, i32 7, ptr null, i32 1, i32 6, i32 0, i32 0, i32 1, i32 1, i32 0, ptr addrspace(1) %tmp8, i32 8, i32 10), "gc-live"(ptr addrspace(1) %tmp9, ptr addrspace(1) %tmp13, ptr addrspace(1) %tmp11, ptr addrspace(1) undef, ptr addrspace(1) %tmp8) ]
     unreachable
 
   bb17:                                             ; preds = %bb10
-    %tmp18 = load atomic i32, i32 addrspace(1)* undef unordered, align 4
+    %tmp18 = load atomic i32, ptr addrspace(1) undef unordered, align 4
     %tmp19 = and i32 %tmp18, 33554431
-    %tmp20 = invoke token (i64, i32, void (i32, i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p1i8f(i64 1, i32 16, void (i32, i8 addrspace(1)*)* nonnull elementtype(void (i32, i8 addrspace(1)*)) @spam, i32 2, i32 0, i32 %tmp19, i8 addrspace(1)* nonnull undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 2, i32 1, i32 71, i32 0, i32 5, i32 0, i32 0, i8 addrspace(1)* %tmp13, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 8, i32 5, i32 12, i32 0, i32 2, i32 0, i32 0, i8 addrspace(1)* %tmp8, i32 7, i8* null), "gc-live"(i8 addrspace(1)* %tmp9, i8 addrspace(1)* %tmp13, i8 addrspace(1)* %tmp13, i8 addrspace(1)* %tmp11, i8 addrspace(1)* %tmp8, i8 addrspace(1)* undef) ]
+    %tmp20 = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 1, i32 16, ptr nonnull elementtype(void (i32, ptr addrspace(1))) @spam, i32 2, i32 0, i32 %tmp19, ptr addrspace(1) nonnull undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 2, i32 1, i32 71, i32 0, i32 5, i32 0, i32 0, ptr addrspace(1) %tmp13, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 8, i32 5, i32 12, i32 0, i32 2, i32 0, i32 0, ptr addrspace(1) %tmp8, i32 7, ptr null), "gc-live"(ptr addrspace(1) %tmp9, ptr addrspace(1) %tmp13, ptr addrspace(1) %tmp13, ptr addrspace(1) %tmp11, ptr addrspace(1) %tmp8, ptr addrspace(1) undef) ]
             to label %bb21 unwind label %bb59
 
   bb21:                                             ; preds = %bb17
-    %tmp22 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp20, i32 0, i32 0) ; (%tmp9, %tmp9)
-    %tmp23 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp20, i32 1, i32 1) ; (%tmp13, %tmp13)
-    %tmp24 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp20, i32 3, i32 3) ; (%tmp11, %tmp11)
-    %tmp25 = load atomic i8 addrspace(1)*, i8 addrspace(1)** @global unordered, align 8
-    %tmp26 = ptrtoint i8 addrspace(1)* %tmp25 to i64
+    %tmp22 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp20, i32 0, i32 0) ; (%tmp9, %tmp9)
+    %tmp23 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp20, i32 1, i32 1) ; (%tmp13, %tmp13)
+    %tmp24 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp20, i32 3, i32 3) ; (%tmp11, %tmp11)
+    %tmp25 = load atomic ptr addrspace(1), ptr @global unordered, align 8
+    %tmp26 = ptrtoint ptr addrspace(1) %tmp25 to i64
     %tmp27 = xor i64 %tmp26, -1
-    %tmp28 = inttoptr i64 %tmp27 to i8 addrspace(1)*
-    %tmp29 = call token (i64, i32, void (i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 2, i32 5, void (i8 addrspace(1)*)* nonnull elementtype(void (i8 addrspace(1)*)) @baz, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 10, i32 1, i32 83, i32 0, i32 5, i32 1, i32 0, i8 addrspace(1)* %tmp23, i32 7, i8* null, i32 8, i32 2, i32 7, i8* null, i32 7, i8* null, i32 8, i32 2), "gc-live"(i8 addrspace(1)* %tmp22, i8 addrspace(1)* %tmp23, i8 addrspace(1)* %tmp23, i8 addrspace(1)* %tmp24, i8 addrspace(1)* %tmp28) ]
-    %tmp30 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp29, i32 1, i32 2) ; (%tmp23, %tmp23)
-    %tmp31 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp29, i32 4, i32 4) ; (%tmp28, %tmp28)
-    %tmp45 = load atomic i8 addrspace(1)*, i8 addrspace(1)** @global.1 unordered, align 8
-    %tmp49 = load i32, i32 addrspace(256)* inttoptr (i64 660 to i32 addrspace(256)*), align 4
-    %tmp32 = icmp eq i8 addrspace(1)* %tmp30, null
+    %tmp28 = inttoptr i64 %tmp27 to ptr addrspace(1)
+    %tmp29 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void (ptr addrspace(1))) @baz, i32 1, i32 0, ptr addrspace(1) undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 10, i32 1, i32 83, i32 0, i32 5, i32 1, i32 0, ptr addrspace(1) %tmp23, i32 7, ptr null, i32 8, i32 2, i32 7, ptr null, i32 7, ptr null, i32 8, i32 2), "gc-live"(ptr addrspace(1) %tmp22, ptr addrspace(1) %tmp23, ptr addrspace(1) %tmp23, ptr addrspace(1) %tmp24, ptr addrspace(1) %tmp28) ]
+    %tmp30 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp29, i32 1, i32 2) ; (%tmp23, %tmp23)
+    %tmp31 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp29, i32 4, i32 4) ; (%tmp28, %tmp28)
+    %tmp45 = load atomic ptr addrspace(1), ptr @global.1 unordered, align 8
+    %tmp49 = load i32, ptr addrspace(256) inttoptr (i64 660 to ptr addrspace(256)), align 4
+    %tmp32 = icmp eq ptr addrspace(1) %tmp30, null
     br i1 %tmp32, label %bb64, label %bb33.preheader
 
   bb33.preheader:                                   ; preds = %bb21
     br label %bb33
 
   bb33:                                             ; preds = %bb33.preheader, %bb33
-    %tmp34 = phi i8 addrspace(1)* [ %tmp57, %bb33 ], [ undef, %bb33.preheader ]
+    %tmp34 = phi ptr addrspace(1) [ %tmp57, %bb33 ], [ undef, %bb33.preheader ]
     %tmp35 = phi i64 [ %tmp37, %bb33 ], [ 0, %bb33.preheader ]
     %tmp37 = add nuw nsw i64 %tmp35, 1
-    %tmp38 = load atomic i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef unordered, align 8
-    %tmp39 = ptrtoint i8 addrspace(1)* %tmp38 to i64
+    %tmp38 = load atomic ptr addrspace(1), ptr addrspace(1) undef unordered, align 8
+    %tmp39 = ptrtoint ptr addrspace(1) %tmp38 to i64
     %tmp40 = xor i64 %tmp39, -1
-    %tmp41 = inttoptr i64 %tmp40 to i8 addrspace(1)*
-    %tmp42 = select i1 false, i8 addrspace(1)* null, i8 addrspace(1)* %tmp41
-    %tmp43 = icmp eq i8 addrspace(1)* %tmp42, %tmp30
-    %tmp44 = select i1 %tmp43, i8 addrspace(1)* null, i8 addrspace(1)* %tmp42
-    call void asm sideeffect "lock btsq $0,($1)", "r,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64 0, i64* undef)
-    %tmp46 = ptrtoint i8 addrspace(1)* %tmp45 to i64
+    %tmp41 = inttoptr i64 %tmp40 to ptr addrspace(1)
+    %tmp42 = select i1 false, ptr addrspace(1) null, ptr addrspace(1) %tmp41
+    %tmp43 = icmp eq ptr addrspace(1) %tmp42, %tmp30
+    %tmp44 = select i1 %tmp43, ptr addrspace(1) null, ptr addrspace(1) %tmp42
+    call void asm sideeffect "lock btsq $0,($1)", "r,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i64 0, ptr undef)
+    %tmp46 = ptrtoint ptr addrspace(1) %tmp45 to i64
     %tmp47 = xor i64 %tmp46, -1
-    %tmp48 = inttoptr i64 %tmp47 to i8 addrspace(1)*
+    %tmp48 = inttoptr i64 %tmp47 to ptr addrspace(1)
     %tmp50 = or i32 %tmp49, 268435456
-    %tmp51 = cmpxchg i32 addrspace(1)* undef, i32 undef, i32 %tmp50 acquire monotonic, align 4
-    call void @wobble(i8 addrspace(1)* nonnull %tmp48)
-    %tmp52 = load atomic i8 addrspace(1)*, i8 addrspace(1)** @global unordered, align 8
-    %tmp53 = ptrtoint i8 addrspace(1)* %tmp52 to i64
+    %tmp51 = cmpxchg ptr addrspace(1) undef, i32 undef, i32 %tmp50 acquire monotonic, align 4
+    call void @wobble(ptr addrspace(1) nonnull %tmp48)
+    %tmp52 = load atomic ptr addrspace(1), ptr @global unordered, align 8
+    %tmp53 = ptrtoint ptr addrspace(1) %tmp52 to i64
     %tmp54 = xor i64 %tmp53, -1
-    %tmp55 = inttoptr i64 %tmp54 to i8 addrspace(1)*
-    %tmp56 = call token (i64, i32, void (i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 2, i32 5, void (i8 addrspace(1)*)* nonnull elementtype(void (i8 addrspace(1)*)) @baz, i32 1, i32 0, i8 addrspace(1)* %tmp55, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 10, i32 1, i32 83, i32 0, i32 5, i32 1, i32 0, i8 addrspace(1)* %tmp44, i32 7, i8* null, i32 8, i32 2, i32 7, i8* null, i32 7, i8* null, i32 8, i32 2), "gc-live"(i8 addrspace(1)* undef, i8 addrspace(1)* %tmp44, i8 addrspace(1)* %tmp44, i8 addrspace(1)* %tmp34, i8 addrspace(1)* undef) ]
-    %tmp57 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp56, i32 3, i32 3) ; (%tmp34, %tmp34)
+    %tmp55 = inttoptr i64 %tmp54 to ptr addrspace(1)
+    %tmp56 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void (ptr addrspace(1))) @baz, i32 1, i32 0, ptr addrspace(1) %tmp55, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 10, i32 1, i32 83, i32 0, i32 5, i32 1, i32 0, ptr addrspace(1) %tmp44, i32 7, ptr null, i32 8, i32 2, i32 7, ptr null, i32 7, ptr null, i32 8, i32 2), "gc-live"(ptr addrspace(1) undef, ptr addrspace(1) %tmp44, ptr addrspace(1) %tmp44, ptr addrspace(1) %tmp34, ptr addrspace(1) undef) ]
+    %tmp57 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp56, i32 3, i32 3) ; (%tmp34, %tmp34)
     br label %bb33
 
   bb59:                                             ; preds = %bb17
     %tmp60 = landingpad token
             cleanup
-    %tmp61 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp60, i32 1, i32 2) ; (%tmp13, %tmp13)
-    %tmp62 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp60, i32 4, i32 4) ; (%tmp8, %tmp8)
-    %tmp63 = call token (i64, i32, void (i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 2882400000, i32 0, void (i32)* elementtype(void (i32)) bitcast (void (i64)* @barney.2 to void (i32)*), i32 1, i32 2, i32 -13, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 2, i32 1, i32 71, i32 0, i32 5, i32 0, i32 0, i8 addrspace(1)* %tmp61, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 1, i32 5, i32 21, i32 0, i32 2, i32 0, i32 0, i8 addrspace(1)* %tmp62, i32 0, i8 addrspace(1)* undef), "gc-live"() ]
+    %tmp61 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp60, i32 1, i32 2) ; (%tmp13, %tmp13)
+    %tmp62 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp60, i32 4, i32 4) ; (%tmp8, %tmp8)
+    %tmp63 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void (i32)) @barney.2, i32 1, i32 2, i32 -13, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 2, i32 1, i32 71, i32 0, i32 5, i32 0, i32 0, ptr addrspace(1) %tmp61, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 1, i32 5, i32 21, i32 0, i32 2, i32 0, i32 0, ptr addrspace(1) %tmp62, i32 0, ptr addrspace(1) undef), "gc-live"() ]
     unreachable
 
   bb64:                                             ; preds = %bb21, %bb1
     %tmp65 = or i32 undef, 268435456
-    %tmp66 = cmpxchg i32 addrspace(1)* undef, i32 undef, i32 %tmp65 acquire monotonic, align 4
-    %tmp67 = call token (i64, i32, void (i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 2, i32 5, void (i8 addrspace(1)*)* nonnull elementtype(void (i8 addrspace(1)*)) @baz, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, i8* null, i32 10, i32 1, i32 133, i32 0, i32 5, i32 1, i32 7, i8* null, i32 8, i32 2, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null, i32 8, i32 2), "gc-live"(i8 addrspace(1)* undef, i8 addrspace(1)* undef) ]
+    %tmp66 = cmpxchg ptr addrspace(1) undef, i32 undef, i32 %tmp65 acquire monotonic, align 4
+    %tmp67 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void (ptr addrspace(1))) @baz, i32 1, i32 0, ptr addrspace(1) undef, i32 0, i32 0) [ "deopt"(i32 0, i32 2, i32 0, i32 0, i32 0, i32 1, i32 0, i32 7, ptr null, i32 10, i32 1, i32 133, i32 0, i32 5, i32 1, i32 7, ptr null, i32 8, i32 2, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 8, i32 2), "gc-live"(ptr addrspace(1) undef, ptr addrspace(1) undef) ]
     br label %bb1
   }
 
-  declare i32* @zot()
+  declare ptr @zot()
 
   declare void @wibble() gc "statepoint-example"
 
-  declare i8 addrspace(1)* @blam() gc "statepoint-example"
+  declare ptr addrspace(1) @blam() gc "statepoint-example"
 
-  declare void @baz(i8 addrspace(1)*) gc "statepoint-example"
+  declare void @baz(ptr addrspace(1)) gc "statepoint-example"
 
-  declare void @spam(i32, i8 addrspace(1)*)
+  declare void @spam(i32, ptr addrspace(1))
 
-  declare void @wobble(i8 addrspace(1)*)
+  declare void @wobble(ptr addrspace(1))
 
-  declare void @barney(i8 addrspace(1)*)
+  declare void @barney(ptr addrspace(1))
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 immarg, i32 immarg, void ()*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   ; Function Attrs: nounwind readnone
-  declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #0
-
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidp1i8f(i64 immarg, i32 immarg, void (i8 addrspace(1)*)*, i32 immarg, i32 immarg, ...)
-
-  declare token @llvm.experimental.gc.statepoint.p0f_p1i8f(i64 immarg, i32 immarg, i8 addrspace(1)* ()*, i32 immarg, i32 immarg, ...)
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
+  
+  
   ; Function Attrs: nounwind readnone
-  declare i8 addrspace(1)* @llvm.experimental.gc.result.p1i8(token) #0
-
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidi32p1i8f(i64 immarg, i32 immarg, void (i32, i8 addrspace(1)*)*, i32 immarg, i32 immarg, ...)
+  declare ptr addrspace(1) @llvm.experimental.gc.result.p1(token) #0
 
+  
   declare void @barney.2(i64)
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 immarg, i32 immarg, void (i32)*, i32 immarg, i32 immarg, ...)
-
-  declare i8 addrspace(1)* @wobble.3(i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)*)
+  
+  declare ptr addrspace(1) @wobble.3(ptr addrspace(1), ptr addrspace(1))
 
   attributes #0 = { nounwind readnone }
 
@@ -306,7 +302,7 @@ body:             |
   ; CHECK-NEXT: bb.4.bb17:
   ; CHECK-NEXT:   successors: %bb.5(0x80000000), %bb.8(0x00000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm undef %35:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm undef %35:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `ptr addrspace(1) undef`, addrspace 1)
   ; CHECK-NEXT:   [[AND32ri:%[0-9]+]]:gr32 = AND32ri [[AND32ri]], 33554431, implicit-def dead $eflags
   ; CHECK-NEXT:   EH_LABEL <mcsymbol .Ltmp0>
   ; CHECK-NEXT:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
@@ -336,7 +332,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @global.1, $noreg :: (load (s64) from got)
   ; CHECK-NEXT:   [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm [[MOV64rm1]], 1, $noreg, 0, $noreg :: (dereferenceable load unordered (s64) from @global.1)
-  ; CHECK-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm $noreg, 1, $noreg, 660, $gs :: (load (s32) from `i32 addrspace(256)* inttoptr (i64 660 to i32 addrspace(256)*)`, addrspace 256)
+  ; CHECK-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm $noreg, 1, $noreg, 660, $gs :: (load (s32) from `ptr addrspace(256) inttoptr (i64 660 to ptr addrspace(256))`, addrspace 256)
   ; CHECK-NEXT:   [[NOT64r1:%[0-9]+]]:gr64 = NOT64r [[NOT64r1]]
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gr64 = COPY [[NOT64r1]]
   ; CHECK-NEXT:   [[OR32ri:%[0-9]+]]:gr32 = OR32ri [[OR32ri]], 268435456, implicit-def dead $eflags
@@ -349,13 +345,13 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.7(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[INC64r:%[0-9]+]]:gr64_with_sub_8bit = nuw nsw INC64r [[INC64r]], implicit-def dead $eflags
-  ; CHECK-NEXT:   [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm undef %59:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `i8 addrspace(1)* addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm undef %59:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `ptr addrspace(1) undef`, addrspace 1)
   ; CHECK-NEXT:   [[NOT64r2:%[0-9]+]]:gr64 = NOT64r [[NOT64r2]]
   ; CHECK-NEXT:   CMP64rr [[NOT64r2]], [[COPY6]], implicit-def $eflags
   ; CHECK-NEXT:   undef %100.sub_32bit:gr64_with_sub_8bit = MOV32ri 0
   ; CHECK-NEXT:   [[CMOV64rr:%[0-9]+]]:gr64 = CMOV64rr [[CMOV64rr]], %100, 4, implicit killed $eflags
   ; CHECK-NEXT:   INLINEASM &"lock btsq $0,($1)", 1 /* sideeffect attdialect */, 4456457 /* reguse:GR64 */, %100, 4456457 /* reguse:GR64 */, undef %56:gr64, 12 /* clobber */, implicit-def dead early-clobber $df, 12 /* clobber */, implicit-def early-clobber $fpsw, 12 /* clobber */, implicit-def dead early-clobber $eflags
-  ; CHECK-NEXT:   LCMPXCHG32 undef %67:gr64, 1, $noreg, 0, $noreg, [[COPY5]], implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `i32 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   LCMPXCHG32 undef %67:gr64, 1, $noreg, 0, $noreg, [[COPY5]], implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `ptr addrspace(1) undef`, addrspace 1)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
   ; CHECK-NEXT:   $rdi = COPY [[COPY4]]
   ; CHECK-NEXT:   CALL64pcrel32 target-flags(x86-plt) @wobble, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp
@@ -382,7 +378,7 @@ body:             |
   ; CHECK-NEXT: bb.9.bb64:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   LCMPXCHG32 undef %76:gr64, 1, $noreg, 0, $noreg, [[MOV32ri]], implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `i32 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   LCMPXCHG32 undef %76:gr64, 1, $noreg, 0, $noreg, [[MOV32ri]], implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `ptr addrspace(1) undef`, addrspace 1)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
   ; CHECK-NEXT:   STATEPOINT 2, 5, 1, undef %79:gr64, undef $rdi, 2, 0, 2, 0, 2, 27, 2, 0, 2, 2, 2, 0, 2, 0, 2, 0, 2, 1, 2, 0, 2, 7, 2, 0, 2, 10, 2, 1, 2, 133, 2, 0, 2, 5, 2, 1, 2, 7, 2, 0, 2, 8, 2, 2, 2, 7, 2, 0, 2, 7, 2, 0, 2, 7, 2, 0, 2, 8, 2, 2, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp
   ; CHECK-NEXT:   ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
@@ -433,7 +429,7 @@ body:             |
   bb.4.bb17:
     successors: %bb.5(0x80000000), %bb.8(0x00000000)
 
-    %36:gr32 = MOV32rm undef %35:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+    %36:gr32 = MOV32rm undef %35:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `ptr addrspace(1) undef`, addrspace 1)
     %36:gr32 = AND32ri %36, 33554431, implicit-def dead $eflags
     EH_LABEL <mcsymbol .Ltmp0>
     ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
@@ -459,7 +455,7 @@ body:             |
     successors: %bb.7(0x80000000)
 
     %64:gr64 = MOV64rm %51, 1, $noreg, 0, $noreg :: (dereferenceable load unordered (s64) from @global.1)
-    %65:gr32 = MOV32rm $noreg, 1, $noreg, 660, $gs :: (load (s32) from `i32 addrspace(256)* inttoptr (i64 660 to i32 addrspace(256)*)`, addrspace 256)
+    %65:gr32 = MOV32rm $noreg, 1, $noreg, 660, $gs :: (load (s32) from `ptr addrspace(256) inttoptr (i64 660 to ptr addrspace(256))`, addrspace 256)
     undef %53.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
     %64:gr64 = NOT64r %64
     %65:gr32 = OR32ri %65, 268435456, implicit-def dead $eflags
@@ -470,12 +466,12 @@ body:             |
     successors: %bb.7(0x80000000)
 
     %81:gr64_with_sub_8bit = nuw nsw INC64r %81, implicit-def dead $eflags
-    %63:gr64 = MOV64rm undef %59:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `i8 addrspace(1)* addrspace(1)* undef`, addrspace 1)
+    %63:gr64 = MOV64rm undef %59:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `ptr addrspace(1) undef`, addrspace 1)
     %63:gr64 = NOT64r %63
     CMP64rr %63, %31, implicit-def $eflags
     %63:gr64 = CMOV64rr %63, %53, 4, implicit killed $eflags
     INLINEASM &"lock btsq $0,($1)", 1 /* sideeffect attdialect */, 4456457 /* reguse:GR64 */, %53, 4456457 /* reguse:GR64 */, undef %56:gr64, 12 /* clobber */, implicit-def dead early-clobber $df, 12 /* clobber */, implicit-def early-clobber $fpsw, 12 /* clobber */, implicit-def dead early-clobber $eflags
-    LCMPXCHG32 undef %67:gr64, 1, $noreg, 0, $noreg, %65, implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `i32 addrspace(1)* undef`, addrspace 1)
+    LCMPXCHG32 undef %67:gr64, 1, $noreg, 0, $noreg, %65, implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `ptr addrspace(1) undef`, addrspace 1)
     ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
     $rdi = COPY %64
     CALL64pcrel32 target-flags(x86-plt) @wobble, csr_64, implicit $rsp, implicit $ssp, implicit killed $rdi, implicit-def $rsp, implicit-def $ssp
@@ -501,7 +497,7 @@ body:             |
   bb.9.bb64:
     successors: %bb.1(0x80000000)
 
-    LCMPXCHG32 undef %76:gr64, 1, $noreg, 0, $noreg, %74, implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `i32 addrspace(1)* undef`, addrspace 1)
+    LCMPXCHG32 undef %76:gr64, 1, $noreg, 0, $noreg, %74, implicit-def dead $eax, implicit-def dead $eflags, implicit undef $eax :: (load store acquire monotonic (s32) on `ptr addrspace(1) undef`, addrspace 1)
     ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
     STATEPOINT 2, 5, 1, undef %79:gr64, undef $rdi, 2, 0, 2, 0, 2, 27, 2, 0, 2, 2, 2, 0, 2, 0, 2, 0, 2, 1, 2, 0, 2, 7, 2, 0, 2, 10, 2, 1, 2, 133, 2, 0, 2, 5, 2, 1, 2, 7, 2, 0, 2, 8, 2, 2, 2, 7, 2, 0, 2, 7, 2, 0, 2, 7, 2, 0, 2, 8, 2, 2, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp
     ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp

diff  --git a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
index 15b5ba26868b9..f72bac86f0a0b 100644
--- a/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
+++ b/llvm/test/CodeGen/X86/statepoint-invoke-ra-remove-back-copies.mir
@@ -9,25 +9,25 @@
   target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-unknown-linux-gnu"
 
-  define void @hoge(i8 addrspace(1)* %arg) gc "statepoint-example" personality i32* ()* @widget {
+  define void @hoge(ptr addrspace(1) %arg) gc "statepoint-example" personality ptr @widget {
   bb:
-    %tmp = call token (i64, i32, void (i8 addrspace(1)*, i8 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidp1i8p1i8f(i64 2, i32 5, void (i8 addrspace(1)*, i8 addrspace(1)*)* nonnull elementtype(void (i8 addrspace(1)*, i8 addrspace(1)*)) @quux, i32 2, i32 0, i8 addrspace(1)* %arg, i8 addrspace(1)* undef, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 6, i32 0, i32 4, i32 1, i32 0, i8 addrspace(1)* %arg, i32 7, i8* null, i32 0, i8 addrspace(1)* %arg, i32 7, i8* null, i32 0, i8 addrspace(1)* %arg, i32 2, i32 1, i32 5, i32 0, i32 2, i32 0, i32 7, i8* null, i32 7, i8* null), "gc-live"(i8 addrspace(1)* %arg) ]
-    %tmp1 = load atomic i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef unordered, align 8
-    %tmp2 = ptrtoint i8 addrspace(1)* %tmp1 to i64
+    %tmp = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2, i32 5, ptr nonnull elementtype(void (ptr addrspace(1), ptr addrspace(1))) @quux, i32 2, i32 0, ptr addrspace(1) %arg, ptr addrspace(1) undef, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 6, i32 0, i32 4, i32 1, i32 0, ptr addrspace(1) %arg, i32 7, ptr null, i32 0, ptr addrspace(1) %arg, i32 7, ptr null, i32 0, ptr addrspace(1) %arg, i32 2, i32 1, i32 5, i32 0, i32 2, i32 0, i32 7, ptr null, i32 7, ptr null), "gc-live"(ptr addrspace(1) %arg) ]
+    %tmp1 = load atomic ptr addrspace(1), ptr addrspace(1) undef unordered, align 8
+    %tmp2 = ptrtoint ptr addrspace(1) %tmp1 to i64
     %tmp3 = xor i64 %tmp2, -1
-    %tmp4 = inttoptr i64 %tmp3 to i8 addrspace(1)*
-    %tmp5 = select i1 false, i8 addrspace(1)* null, i8 addrspace(1)* %tmp4
+    %tmp4 = inttoptr i64 %tmp3 to ptr addrspace(1)
+    %tmp5 = select i1 false, ptr addrspace(1) null, ptr addrspace(1) %tmp4
     br i1 undef, label %bb6, label %bb40
 
   bb6:                                              ; preds = %bb
     br label %bb7
 
   bb7:                                              ; preds = %bb27, %bb6
-    %tmp9 = phi i8 addrspace(1)* [ undef, %bb6 ], [ %tmp28, %bb27 ]
-    %tmp10 = phi i8 addrspace(1)* [ undef, %bb6 ], [ %tmp29, %bb27 ]
-    %tmp11 = phi i8 addrspace(1)* [ undef, %bb6 ], [ %tmp30, %bb27 ]
+    %tmp9 = phi ptr addrspace(1) [ undef, %bb6 ], [ %tmp28, %bb27 ]
+    %tmp10 = phi ptr addrspace(1) [ undef, %bb6 ], [ %tmp29, %bb27 ]
+    %tmp11 = phi ptr addrspace(1) [ undef, %bb6 ], [ %tmp30, %bb27 ]
     %tmp12 = phi i32 [ 0, %bb6 ], [ %tmp16, %bb27 ]
-    %tmp13 = load atomic i32, i32 addrspace(1)* undef unordered, align 8
+    %tmp13 = load atomic i32, ptr addrspace(1) undef unordered, align 8
     %tmp14 = sub i32 %tmp13, 0
     %tmp15 = select i1 false, i32 %tmp14, i32 undef
     %tmp16 = add i32 %tmp15, %tmp12
@@ -40,17 +40,17 @@
     br i1 undef, label %bb27, label %bb23
 
   bb23:                                             ; preds = %bb20
-    %tmp24 = invoke token (i64, i32, void (i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p1i8p1i8i32i32f(i64 1, i32 16, void (i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)* nonnull elementtype(void (i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)) @barney, i32 5, i32 0, i32 undef, i8 addrspace(1)* nonnull undef, i8 addrspace(1)* null, i32 0, i32 undef, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp11, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp11, i32 10, i32 2, i32 12, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* %tmp11, i32 0, i8 addrspace(1)* %tmp11, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp11, i32 2, i32 8, i32 4, i32 0, i32 1, i32 0, i32 7, i8* null, i32 2, i32 12, i32 7, i32 0, i32 2, i32 0, i32 7, i8* null, i32 7, i8* null, i32 10, i32 18, i32 96, i32 0, i32 9, i32 1, i32 0, i8 addrspace(1)* %tmp10, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp9, i32 3, i32 %tmp16, i32 3, i32 0, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp9, i32 8, i32 9, i32 34, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* %tmp10, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 0, i8 addrspace(1)* undef), "gc-live"(i8 addrspace(1)* %tmp11, i8 addrspace(1)* %tmp9, i8 addrspace(1)* undef, i8 addrspace(1)* %tmp10, i8 addrspace(1)* undef, i8 addrspace(1)* %tmp5) ]
+    %tmp24 = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 1, i32 16, ptr nonnull elementtype(void (i32, ptr addrspace(1), ptr addrspace(1), i32, i32)) @barney, i32 5, i32 0, i32 undef, ptr addrspace(1) nonnull undef, ptr addrspace(1) null, i32 0, i32 undef, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp11, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp11, i32 10, i32 2, i32 12, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) %tmp11, i32 0, ptr addrspace(1) %tmp11, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp11, i32 2, i32 8, i32 4, i32 0, i32 1, i32 0, i32 7, ptr null, i32 2, i32 12, i32 7, i32 0, i32 2, i32 0, i32 7, ptr null, i32 7, ptr null, i32 10, i32 18, i32 96, i32 0, i32 9, i32 1, i32 0, ptr addrspace(1) %tmp10, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp9, i32 3, i32 %tmp16, i32 3, i32 0, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp9, i32 8, i32 9, i32 34, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) %tmp10, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef), "gc-live"(ptr addrspace(1) %tmp11, ptr addrspace(1) %tmp9, ptr addrspace(1) undef, ptr addrspace(1) %tmp10, ptr addrspace(1) undef, ptr addrspace(1) %tmp5) ]
             to label %bb25 unwind label %bb35
 
   bb25:                                             ; preds = %bb23
-    %tmp26 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp24, i32 5, i32 5) ; (%tmp5, %tmp5)
+    %tmp26 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp24, i32 5, i32 5) ; (%tmp5, %tmp5)
     br label %bb27
 
   bb27:                                             ; preds = %bb25, %bb20
-    %tmp28 = phi i8 addrspace(1)* [ %tmp9, %bb20 ], [ undef, %bb25 ]
-    %tmp29 = phi i8 addrspace(1)* [ %tmp10, %bb20 ], [ undef, %bb25 ]
-    %tmp30 = phi i8 addrspace(1)* [ %tmp11, %bb20 ], [ null, %bb25 ]
+    %tmp28 = phi ptr addrspace(1) [ %tmp9, %bb20 ], [ undef, %bb25 ]
+    %tmp29 = phi ptr addrspace(1) [ %tmp10, %bb20 ], [ undef, %bb25 ]
+    %tmp30 = phi ptr addrspace(1) [ %tmp11, %bb20 ], [ null, %bb25 ]
     %tmp34 = icmp sgt i32 0, %tmp16
     br i1 %tmp34, label %bb7, label %bb44
 
@@ -60,7 +60,7 @@
     br i1 undef, label %bb39, label %bb37
 
   bb37:                                             ; preds = %bb35
-    %tmp38 = call token (i64, i32, void (i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 2882400000, i32 0, void (i32)* nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 3, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 10, i32 2, i32 12, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 2, i32 8, i32 4, i32 0, i32 1, i32 0, i32 7, i8* null, i32 2, i32 12, i32 7, i32 0, i32 2, i32 0, i32 7, i8* null, i32 7, i8* null, i32 10, i32 18, i32 96, i32 0, i32 9, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 3, i32 %tmp16, i32 3, i32 0, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 0, i32 9, i32 51, i32 0, i32 3, i32 0, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null), "gc-live"() ]
+    %tmp38 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 3, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 10, i32 2, i32 12, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 2, i32 8, i32 4, i32 0, i32 1, i32 0, i32 7, ptr null, i32 2, i32 12, i32 7, i32 0, i32 2, i32 0, i32 7, ptr null, i32 7, ptr null, i32 10, i32 18, i32 96, i32 0, i32 9, i32 1, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 3, i32 %tmp16, i32 3, i32 0, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 0, i32 9, i32 51, i32 0, i32 3, i32 0, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null), "gc-live"() ]
     unreachable
 
   bb39:                                             ; preds = %bb35
@@ -72,7 +72,7 @@
 
   bb41:                                             ; preds = %bb7, %bb40
     %tmp42 = phi i32 [ 0, %bb40 ], [ %tmp12, %bb7 ]
-    %tmp43 = call token (i64, i32, void (i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 2882400000, i32 0, void (i32)* nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 -39, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 10, i32 2, i32 12, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 2, i32 8, i32 4, i32 0, i32 1, i32 0, i32 7, i8* null, i32 2, i32 12, i32 7, i32 0, i32 2, i32 0, i32 7, i8* null, i32 7, i8* null, i32 10, i32 18, i32 63, i32 0, i32 9, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 3, i32 %tmp42, i32 3, i32 0, i32 3, i32 undef, i32 7, i8* null, i32 0, i8 addrspace(1)* undef, i32 2, i32 33, i32 6, i32 0, i32 5, i32 0, i32 0, i8 addrspace(1)* undef, i32 3, i32 %tmp42, i32 3, i32 undef, i32 0, i8 addrspace(1)* undef, i32 3, i32 undef, i32 1, i32 34, i32 14, i32 0, i32 3, i32 0, i32 3, i32 %tmp42, i32 3, i32 undef, i32 3, i32 0), "gc-live"() ]
+    %tmp43 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 -39, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 10, i32 2, i32 12, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 2, i32 8, i32 4, i32 0, i32 1, i32 0, i32 7, ptr null, i32 2, i32 12, i32 7, i32 0, i32 2, i32 0, i32 7, ptr null, i32 7, ptr null, i32 10, i32 18, i32 63, i32 0, i32 9, i32 1, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 3, i32 %tmp42, i32 3, i32 0, i32 3, i32 undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 2, i32 33, i32 6, i32 0, i32 5, i32 0, i32 0, ptr addrspace(1) undef, i32 3, i32 %tmp42, i32 3, i32 undef, i32 0, ptr addrspace(1) undef, i32 3, i32 undef, i32 1, i32 34, i32 14, i32 0, i32 3, i32 0, i32 3, i32 %tmp42, i32 3, i32 undef, i32 3, i32 0), "gc-live"() ]
     unreachable
 
   bb44:                                             ; preds = %bb27
@@ -83,49 +83,47 @@
     br i1 undef, label %bb56, label %bb46
 
   bb46:                                             ; preds = %bb45
-    %tmp47 = invoke token (i64, i32, void (i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32p1i8p1i8i32i32f(i64 1, i32 16, void (i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)* nonnull elementtype(void (i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)) @barney, i32 5, i32 0, i32 undef, i8 addrspace(1)* nonnull undef, i8 addrspace(1)* undef, i32 0, i32 undef, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp30, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp30, i32 10, i32 2, i32 19, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* %tmp30, i32 0, i8 addrspace(1)* %tmp30, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp30, i32 8, i32 9, i32 34, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* undef, i32 0, i8 addrspace(1)* undef, i32 7, i8* null, i32 0, i8 addrspace(1)* undef), "gc-live"(i8 addrspace(1)* %tmp30, i8 addrspace(1)* undef, i8 addrspace(1)* undef) ]
+    %tmp47 = invoke token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 1, i32 16, ptr nonnull elementtype(void (i32, ptr addrspace(1), ptr addrspace(1), i32, i32)) @barney, i32 5, i32 0, i32 undef, ptr addrspace(1) nonnull undef, ptr addrspace(1) undef, i32 0, i32 undef, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp30, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp30, i32 10, i32 2, i32 19, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) %tmp30, i32 0, ptr addrspace(1) %tmp30, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp30, i32 8, i32 9, i32 34, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef), "gc-live"(ptr addrspace(1) %tmp30, ptr addrspace(1) undef, ptr addrspace(1) undef) ]
             to label %bb48 unwind label %bb52
 
   bb48:                                             ; preds = %bb46
-    %tmp49 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp47, i32 0, i32 0) ; (%tmp30, %tmp30)
+    %tmp49 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp47, i32 0, i32 0) ; (%tmp30, %tmp30)
     br label %bb56
 
   bb50:                                             ; preds = %bb44
-    %tmp51 = call token (i64, i32, void (i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 2882400000, i32 0, void (i32)* nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 10, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp30, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp30, i32 10, i32 2, i32 19, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* %tmp30, i32 0, i8 addrspace(1)* %tmp30, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp30, i32 1, i32 9, i32 6, i32 1, i32 3, i32 0, i32 0, i8 addrspace(1)* null, i32 0, i8 addrspace(1)* undef, i32 0, i8 addrspace(1)* null, i32 7, i8* null), "gc-live"() ]
+    %tmp51 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 10, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp30, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp30, i32 10, i32 2, i32 19, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) %tmp30, i32 0, ptr addrspace(1) %tmp30, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp30, i32 1, i32 9, i32 6, i32 1, i32 3, i32 0, i32 0, ptr addrspace(1) null, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) null, i32 7, ptr null), "gc-live"() ]
     unreachable
 
   bb52:                                             ; preds = %bb46
     %tmp53 = landingpad token
             cleanup
-    %tmp54 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %tmp53, i32 0, i32 0) ; (%tmp30, %tmp30)
-    %tmp55 = call token (i64, i32, void (i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 2882400000, i32 0, void (i32)* nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 3, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp54, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp54, i32 10, i32 2, i32 19, i32 0, i32 3, i32 1, i32 0, i8 addrspace(1)* %tmp54, i32 0, i8 addrspace(1)* %tmp54, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp54, i32 0, i32 9, i32 51, i32 0, i32 3, i32 0, i32 7, i8* null, i32 7, i8* null, i32 7, i8* null), "gc-live"() ]
+    %tmp54 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %tmp53, i32 0, i32 0) ; (%tmp30, %tmp30)
+    %tmp55 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 3, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp54, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp54, i32 10, i32 2, i32 19, i32 0, i32 3, i32 1, i32 0, ptr addrspace(1) %tmp54, i32 0, ptr addrspace(1) %tmp54, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp54, i32 0, i32 9, i32 51, i32 0, i32 3, i32 0, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null), "gc-live"() ]
     unreachable
 
   bb56:                                             ; preds = %bb48, %bb45
-    %tmp57 = phi i8 addrspace(1)* [ %tmp30, %bb45 ], [ %tmp49, %bb48 ]
-    %tmp58 = call token (i64, i32, void (i32)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 2882400000, i32 0, void (i32)* nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 10, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, i8* null, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp57, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp57, i32 9, i32 2, i32 26, i32 1, i32 3, i32 1, i32 0, i8 addrspace(1)* null, i32 0, i8 addrspace(1)* %tmp57, i32 0, i8 addrspace(1)* %tmp57, i32 7, i8* null, i32 0, i8 addrspace(1)* %tmp57), "gc-live"() ]
+    %tmp57 = phi ptr addrspace(1) [ %tmp30, %bb45 ], [ %tmp49, %bb48 ]
+    %tmp58 = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr nonnull elementtype(void (i32)) @ham, i32 1, i32 2, i32 10, i32 0, i32 0) [ "deopt"(i32 0, i32 10, i32 0, i32 10, i32 0, i32 4, i32 1, i32 7, ptr null, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp57, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp57, i32 9, i32 2, i32 26, i32 1, i32 3, i32 1, i32 0, ptr addrspace(1) null, i32 0, ptr addrspace(1) %tmp57, i32 0, ptr addrspace(1) %tmp57, i32 7, ptr null, i32 0, ptr addrspace(1) %tmp57), "gc-live"() ]
     unreachable
   }
 
-  declare i32* @widget()
+  declare ptr @widget()
 
-  declare void @quux(i8 addrspace(1)*, i8 addrspace(1)*)
+  declare void @quux(ptr addrspace(1), ptr addrspace(1))
 
   declare void @hoge.1()
 
-  declare void @barney(i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)
+  declare void @barney(i32, ptr addrspace(1), ptr addrspace(1), i32, i32)
 
   ; Function Attrs: nounwind readnone
-  declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidp1i8p1i8f(i64 immarg, i32 immarg, void (i8 addrspace(1)*, i8 addrspace(1)*)*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   declare void @ham(i32)
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidi32f(i64 immarg, i32 immarg, void (i32)*, i32 immarg, i32 immarg, ...)
-
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidi32p1i8p1i8i32i32f(i64 immarg, i32 immarg, void (i32, i8 addrspace(1)*, i8 addrspace(1)*, i32, i32)*, i32 immarg, i32 immarg, ...)
-
+  
+  
   declare void @wombat()
 
   attributes #0 = { nounwind readnone }
@@ -272,7 +270,7 @@ body:             |
   ; CHECK-NEXT: bb.1.bb6:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm undef %17:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `i8 addrspace(1)* addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm undef %17:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `ptr addrspace(1) undef`, addrspace 1)
   ; CHECK-NEXT:   [[NOT64r:%[0-9]+]]:gr64 = NOT64r [[MOV64rm]]
   ; CHECK-NEXT:   MOV64mr %stack.1, 1, $noreg, 0, $noreg, [[NOT64r]] :: (store (s64) into %stack.1)
   ; CHECK-NEXT:   undef %48.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
@@ -306,7 +304,7 @@ body:             |
   ; CHECK-NEXT: bb.6.bb7:
   ; CHECK-NEXT:   successors: %bb.16(0x00000000), %bb.7(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm undef %24:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `i32 addrspace(1)* undef`, align 8, addrspace 1)
+  ; CHECK-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm undef %24:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `ptr addrspace(1) undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[ADD32rr]], [[MOV32r0_1]], implicit-def dead $eflags
   ; CHECK-NEXT:   CMP32rr [[MOV32r0_1]], [[ADD32rr]], implicit-def $eflags
   ; CHECK-NEXT:   JCC_1 %bb.16, 15, implicit $eflags
@@ -468,7 +466,7 @@ body:             |
   bb.1.bb6:
     successors: %bb.2(0x80000000)
 
-    %0:gr64 = MOV64rm undef %17:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `i8 addrspace(1)* addrspace(1)* undef`, addrspace 1)
+    %0:gr64 = MOV64rm undef %17:gr64, 1, $noreg, 0, $noreg :: (load unordered (s64) from `ptr addrspace(1) undef`, addrspace 1)
     %0:gr64 = NOT64r %0
     undef %48.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags
     %1:gr64 = IMPLICIT_DEF
@@ -501,7 +499,7 @@ body:             |
   bb.4.bb7:
     successors: %bb.13(0x00000000), %bb.5(0x80000000)
 
-    %5:gr32 = MOV32rm undef %24:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `i32 addrspace(1)* undef`, align 8, addrspace 1)
+    %5:gr32 = MOV32rm undef %24:gr64, 1, $noreg, 0, $noreg :: (load unordered (s32) from `ptr addrspace(1) undef`, align 8, addrspace 1)
     %5:gr32 = ADD32rr %5, %77, implicit-def dead $eflags
     CMP32rr %77, %5, implicit-def $eflags
     JCC_1 %bb.13, 15, implicit $eflags

diff  --git a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
index 50802c171e377..e24d5e8af1f55 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg-folding.mir
@@ -9,53 +9,53 @@
 
   declare void @func()
 
-  define i32 @test_spill(i32 addrspace(1)* %arg00, i32 addrspace(1)* %arg01, i32 addrspace(1)* %arg02, i32 addrspace(1)* %arg03, i32 addrspace(1)* %arg04, i32 addrspace(1)* %arg05, i32 addrspace(1)* %arg06, i32 addrspace(1)* %arg07, i32 addrspace(1)* %arg08) gc "statepoint-example" {
-    %token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* %arg00, i32 addrspace(1)* %arg01, i32 addrspace(1)* %arg02, i32 addrspace(1)* %arg03, i32 addrspace(1)* %arg04, i32 addrspace(1)* %arg05, i32 addrspace(1)* %arg06, i32 addrspace(1)* %arg07, i32 addrspace(1)* %arg08) ]
-    %rel00 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 0, i32 0) ; (%arg00, %arg00)
-    %rel01 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 1, i32 1) ; (%arg01, %arg01)
-    %rel02 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 2, i32 2) ; (%arg02, %arg02)
-    %rel03 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 3, i32 3) ; (%arg03, %arg03)
-    %rel04 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 4, i32 4) ; (%arg04, %arg04)
-    %rel05 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 5, i32 5) ; (%arg05, %arg05)
-    %rel06 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 6, i32 6) ; (%arg06, %arg06)
-    %rel07 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 7, i32 7) ; (%arg07, %arg07)
-    %rel08 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 8, i32 8) ; (%arg08, %arg08)
-    %gep00 = getelementptr i32, i32 addrspace(1)* %rel00, i64 1
-    %gep01 = getelementptr i32, i32 addrspace(1)* %rel01, i64 2
-    %gep02 = getelementptr i32, i32 addrspace(1)* %rel02, i64 3
-    %gep03 = getelementptr i32, i32 addrspace(1)* %rel03, i64 4
-    %gep04 = getelementptr i32, i32 addrspace(1)* %rel04, i64 5
-    %gep05 = getelementptr i32, i32 addrspace(1)* %rel05, i64 6
-    %gep06 = getelementptr i32, i32 addrspace(1)* %rel06, i64 7
-    %gep07 = getelementptr i32, i32 addrspace(1)* %rel07, i64 8
-    %gep08 = getelementptr i32, i32 addrspace(1)* %rel08, i64 9
-    %val00 = load i32, i32 addrspace(1)* %gep00, align 4
-    %val01 = load i32, i32 addrspace(1)* %gep01, align 4
+  define i32 @test_spill(ptr addrspace(1) %arg00, ptr addrspace(1) %arg01, ptr addrspace(1) %arg02, ptr addrspace(1) %arg03, ptr addrspace(1) %arg04, ptr addrspace(1) %arg05, ptr addrspace(1) %arg06, ptr addrspace(1) %arg07, ptr addrspace(1) %arg08) gc "statepoint-example" {
+    %token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @func, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) %arg00, ptr addrspace(1) %arg01, ptr addrspace(1) %arg02, ptr addrspace(1) %arg03, ptr addrspace(1) %arg04, ptr addrspace(1) %arg05, ptr addrspace(1) %arg06, ptr addrspace(1) %arg07, ptr addrspace(1) %arg08) ]
+    %rel00 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 0, i32 0) ; (%arg00, %arg00)
+    %rel01 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 1, i32 1) ; (%arg01, %arg01)
+    %rel02 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 2, i32 2) ; (%arg02, %arg02)
+    %rel03 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 3, i32 3) ; (%arg03, %arg03)
+    %rel04 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 4, i32 4) ; (%arg04, %arg04)
+    %rel05 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 5, i32 5) ; (%arg05, %arg05)
+    %rel06 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 6, i32 6) ; (%arg06, %arg06)
+    %rel07 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 7, i32 7) ; (%arg07, %arg07)
+    %rel08 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 8, i32 8) ; (%arg08, %arg08)
+    %gep00 = getelementptr i32, ptr addrspace(1) %rel00, i64 1
+    %gep01 = getelementptr i32, ptr addrspace(1) %rel01, i64 2
+    %gep02 = getelementptr i32, ptr addrspace(1) %rel02, i64 3
+    %gep03 = getelementptr i32, ptr addrspace(1) %rel03, i64 4
+    %gep04 = getelementptr i32, ptr addrspace(1) %rel04, i64 5
+    %gep05 = getelementptr i32, ptr addrspace(1) %rel05, i64 6
+    %gep06 = getelementptr i32, ptr addrspace(1) %rel06, i64 7
+    %gep07 = getelementptr i32, ptr addrspace(1) %rel07, i64 8
+    %gep08 = getelementptr i32, ptr addrspace(1) %rel08, i64 9
+    %val00 = load i32, ptr addrspace(1) %gep00, align 4
+    %val01 = load i32, ptr addrspace(1) %gep01, align 4
     %sum01 = add i32 %val00, %val01
-    %val02 = load i32, i32 addrspace(1)* %gep02, align 4
+    %val02 = load i32, ptr addrspace(1) %gep02, align 4
     %sum02 = add i32 %sum01, %val02
-    %val03 = load i32, i32 addrspace(1)* %gep03, align 4
+    %val03 = load i32, ptr addrspace(1) %gep03, align 4
     %sum03 = add i32 %sum02, %val03
-    %val04 = load i32, i32 addrspace(1)* %gep04, align 4
+    %val04 = load i32, ptr addrspace(1) %gep04, align 4
     %sum04 = add i32 %sum03, %val04
-    %val05 = load i32, i32 addrspace(1)* %gep05, align 4
+    %val05 = load i32, ptr addrspace(1) %gep05, align 4
     %sum05 = add i32 %sum04, %val05
-    %val06 = load i32, i32 addrspace(1)* %gep06, align 4
+    %val06 = load i32, ptr addrspace(1) %gep06, align 4
     %sum06 = add i32 %sum05, %val06
-    %val07 = load i32, i32 addrspace(1)* %gep07, align 4
+    %val07 = load i32, ptr addrspace(1) %gep07, align 4
     %sum07 = add i32 %sum06, %val07
-    %val08 = load i32, i32 addrspace(1)* %gep08, align 4
+    %val08 = load i32, ptr addrspace(1) %gep08, align 4
     %sum08 = add i32 %sum07, %val08
     ret i32 %sum08
   }
 
   ; Function Attrs: nounwind readonly
-  declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 immarg, i32 immarg, void ()*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   ; Function Attrs: nounwind
-  declare void @llvm.stackprotector(i8*, i8**) #1
+  declare void @llvm.stackprotector(ptr, ptr) #1
 
   attributes #0 = { nounwind readonly }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/X86/statepoint-vreg-twoaddr.mir b/llvm/test/CodeGen/X86/statepoint-vreg-twoaddr.mir
index 5a487d3662546..9e29739812d32 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg-twoaddr.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg-twoaddr.mir
@@ -11,26 +11,26 @@
 
   declare i1 @return_i1()
 
-  declare void @consume(i32 addrspace(1)*)
-  declare void @consume1(i8 addrspace(1)*, i64 addrspace(1)*)
+  declare void @consume(ptr addrspace(1))
+  declare void @consume1(ptr addrspace(1), ptr addrspace(1))
 
-  define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
+  define i1 @test_relocate(ptr addrspace(1) %a) gc "statepoint-example" {
   entry:
-    %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* %a) ]
-    %rel1 = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %safepoint_token, i32 0, i32 0) ; (%a, %a)
+    %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) %a) ]
+    %rel1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0) ; (%a, %a)
     %res1 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token)
-    call void @consume(i32 addrspace(1)* %rel1)
+    call void @consume(ptr addrspace(1) %rel1)
     ret i1 %res1
   }
 
-  define void @test_duplicate_gcregs(i8 addrspace(1)* %a) gc "statepoint-example" {
+  define void @test_duplicate_gcregs(ptr addrspace(1) %a) gc "statepoint-example" {
     ret void
   }
 
   ; Function Attrs: nounwind readnone
-  declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
-  declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64 immarg, i32 immarg, i1 ()*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   ; Function Attrs: nounwind readnone
   declare i1 @llvm.experimental.gc.result.i1(token) #0

diff  --git a/llvm/test/CodeGen/X86/statepoint-vreg.mir b/llvm/test/CodeGen/X86/statepoint-vreg.mir
index b7cbc1703f83b..bfeadfc93da8f 100644
--- a/llvm/test/CodeGen/X86/statepoint-vreg.mir
+++ b/llvm/test/CodeGen/X86/statepoint-vreg.mir
@@ -9,7 +9,7 @@
 
   declare void @bar()
 
-  define i32 @test_basic(i32 addrspace(1)* %obj1, i32 addrspace(1)* %obj2) gc "statepoint-example" {
+  define i32 @test_basic(ptr addrspace(1) %obj1, ptr addrspace(1) %obj2) gc "statepoint-example" {
   ; CHECK-LABEL: test_basic:
   ; CHECK:       # %bb.0:
   ; CHECK-NEXT:    pushq %r14
@@ -33,11 +33,11 @@
   ; CHECK-NEXT:    popq %r14
   ; CHECK-NEXT:    .cfi_def_cfa_offset 8
   ; CHECK-NEXT:    retq
-    %token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 2882400000, i32 0, void ()* elementtype(void ()) @bar, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(i32 addrspace(1)* %obj1, i32 addrspace(1)* %obj2) ]
-    %rel1 = call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 0, i32 0) ; (%obj1, %obj1)
-    %rel2 = call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %token, i32 1, i32 1) ; (%obj2, %obj2)
-    %a = load i32, i32 addrspace(1)* %rel1, align 4
-    %b = load i32, i32 addrspace(1)* %rel2, align 4
+    %token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 2882400000, i32 0, ptr elementtype(void ()) @bar, i32 0, i32 0, i32 0, i32 0) [ "gc-live"(ptr addrspace(1) %obj1, ptr addrspace(1) %obj2) ]
+    %rel1 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 0, i32 0) ; (%obj1, %obj1)
+    %rel2 = call coldcc ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %token, i32 1, i32 1) ; (%obj2, %obj2)
+    %a = load i32, ptr addrspace(1) %rel1, align 4
+    %b = load i32, ptr addrspace(1) %rel2, align 4
     %c = add i32 %a, %b
     ret i32 %c
   }
@@ -109,10 +109,10 @@
   ; CHECK-NEXT:    .short	0
   ; CHECK-NEXT:    .p2align	3
 
-  declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 immarg, i32 immarg, void ()*, i32 immarg, i32 immarg, ...)
+  declare token @llvm.experimental.gc.statepoint.p0(i64 immarg, i32 immarg, ptr, i32 immarg, i32 immarg, ...)
 
   ; Function Attrs: nounwind readonly
-  declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32 immarg, i32 immarg) #0
+  declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32 immarg, i32 immarg) #0
 
   attributes #0 = { nounwind readonly }
   attributes #1 = { nounwind }

diff  --git a/llvm/test/CodeGen/X86/tail-call-conditional.mir b/llvm/test/CodeGen/X86/tail-call-conditional.mir
index 7d1c8d6a0630a..05748f0a2eae3 100644
--- a/llvm/test/CodeGen/X86/tail-call-conditional.mir
+++ b/llvm/test/CodeGen/X86/tail-call-conditional.mir
@@ -5,24 +5,24 @@
 --- |
   target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
-  define i64 @test(i64 %arg, i8* %arg1) optsize {
+  define i64 @test(i64 %arg, ptr %arg1) optsize {
     %tmp = icmp ult i64 %arg, 100
     br i1 %tmp, label %1, label %4
 
     %tmp3 = icmp ult i64 %arg, 10
     br i1 %tmp3, label %2, label %3
 
-    %tmp5 = tail call i64 @f1(i8* %arg1, i64 %arg)
+    %tmp5 = tail call i64 @f1(ptr %arg1, i64 %arg)
     ret i64 %tmp5
 
-    %tmp7 = tail call i64 @f2(i8* %arg1, i64 %arg)
+    %tmp7 = tail call i64 @f2(ptr %arg1, i64 %arg)
     ret i64 %tmp7
 
     ret i64 123
   }
 
-  declare i64 @f1(i8*, i64)
-  declare i64 @f2(i8*, i64)
+  declare i64 @f1(ptr, i64)
+  declare i64 @f2(ptr, i64)
 
 ...
 ---

diff  --git a/llvm/test/CodeGen/X86/taildup-callsiteinfo.mir b/llvm/test/CodeGen/X86/taildup-callsiteinfo.mir
index 266dcc0bd0483..def9e2e1eee0d 100644
--- a/llvm/test/CodeGen/X86/taildup-callsiteinfo.mir
+++ b/llvm/test/CodeGen/X86/taildup-callsiteinfo.mir
@@ -12,23 +12,23 @@
   target datalayout = "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
   target triple = "x86_64-pc-windows-msvc19.22.27905"
 
-  define dso_local void @taildupit(i32* readonly %size_ptr) {
+  define dso_local void @taildupit(ptr readonly %size_ptr) {
   entry:
-    %tobool = icmp eq i32* %size_ptr, null
+    %tobool = icmp eq ptr %size_ptr, null
     br i1 %tobool, label %cond.end, label %cond.true
 
   cond.true:                                        ; preds = %entry
-    %0 = load i32, i32* %size_ptr, align 4
+    %0 = load i32, ptr %size_ptr, align 4
     br label %cond.end
 
   cond.end:                                         ; preds = %cond.true, %entry
     %cond = phi i32 [ %0, %cond.true ], [ 1, %entry ]
-    %call = tail call i8* @alloc(i32 %cond)
+    %call = tail call ptr @alloc(i32 %cond)
     tail call void @f2()
     ret void
   }
 
-  declare dso_local i8* @alloc(i32)
+  declare dso_local ptr @alloc(i32)
 
   declare dso_local void @f2()
 

diff  --git a/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir b/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir
index b844514aca326..4c715b894fae8 100644
--- a/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir
+++ b/llvm/test/CodeGen/X86/unfoldMemoryOperand.mir
@@ -3,22 +3,21 @@
 --- |
   @x = dso_local global i32 0, align 4
   @z = dso_local local_unnamed_addr global [1024 x i32] zeroinitializer, align 16
-  @y = dso_local local_unnamed_addr constant i32* null, align 8
+  @y = dso_local local_unnamed_addr constant ptr null, align 8
 
   ; Function Attrs: nofree norecurse nosync nounwind uwtable writeonly mustprogress
   define dso_local void @_Z3foov() local_unnamed_addr #0 {
-    %1 = load i32*, i32** @y, align 8, !tbaa !3
-    %2 = icmp eq i32* %1, @x
+    %1 = load ptr, ptr @y, align 8, !tbaa !3
+    %2 = icmp eq ptr %1, @x
     %3 = zext i1 %2 to i32
     br label %5
   4:                                                ; preds = %5
     ret void
   5:                                                ; preds = %5, %0
     %lsr.iv = phi i64 [ %lsr.iv.next, %5 ], [ -4096, %0 ]
-    %uglygep = getelementptr i8, i8* bitcast ([1024 x i32]* @z to i8*), i64 %lsr.iv
-    %uglygep2 = bitcast i8* %uglygep to i32*
-    %scevgep = getelementptr i32, i32* %uglygep2, i64 1024
-    store i32 %3, i32* %scevgep, align 4, !tbaa !7
+    %uglygep = getelementptr i8, ptr @z, i64 %lsr.iv
+    %scevgep = getelementptr i32, ptr %uglygep, i64 1024
+    store i32 %3, ptr %scevgep, align 4, !tbaa !7
     %lsr.iv.next = add nsw i64 %lsr.iv, 4
     %6 = icmp eq i64 %lsr.iv.next, 0
     br i1 %6, label %4, label %5, !llvm.loop !9

diff  --git a/llvm/test/CodeGen/X86/win64-eh-empty-block-2.mir b/llvm/test/CodeGen/X86/win64-eh-empty-block-2.mir
index f7ca2fc279164..df3eb2f8aed72 100644
--- a/llvm/test/CodeGen/X86/win64-eh-empty-block-2.mir
+++ b/llvm/test/CodeGen/X86/win64-eh-empty-block-2.mir
@@ -39,7 +39,7 @@
   target triple = "x86_64-unknown-windows-msvc19.11.0"
 
   ; Function Attrs: uwtable
-  define dso_local i32 @"?multi_throw@@YAH_N00 at Z"(i1 zeroext %c1, i1 zeroext %c2, i1 zeroext %c3) local_unnamed_addr #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) {
+  define dso_local i32 @"?multi_throw@@YAH_N00 at Z"(i1 zeroext %c1, i1 zeroext %c2, i1 zeroext %c3) local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 {
   entry:
     br i1 %c1, label %if.then, label %if.end
 
@@ -68,7 +68,7 @@
     %0 = catchswitch within none [label %catch] unwind to caller
 
   catch:                                            ; preds = %catch.dispatch
-    %1 = catchpad within %0 [i8* null, i32 64, i8* null]
+    %1 = catchpad within %0 [ptr null, i32 64, ptr null]
     catchret from %1 to label %return
 
   return:                                           ; preds = %catch, %if.end6

diff  --git a/llvm/test/CodeGen/X86/x87-reg-usage.mir b/llvm/test/CodeGen/X86/x87-reg-usage.mir
index bf4f99bc1b304..23497945793b5 100644
--- a/llvm/test/CodeGen/X86/x87-reg-usage.mir
+++ b/llvm/test/CodeGen/X86/x87-reg-usage.mir
@@ -5,56 +5,56 @@
 --- |
   declare float @llvm.sqrt.f32(float)
 
-  define void @f1(float* %a, float* %b) {
-    %1 = load float, float* %a, align 4
-    %2 = load float, float* %b, align 4
+  define void @f1(ptr %a, ptr %b) {
+    %1 = load float, ptr %a, align 4
+    %2 = load float, ptr %b, align 4
     %sub = fsub float %1, %2
-    store float %sub, float* %a, align 4
+    store float %sub, ptr %a, align 4
     ret void
   }
 
-  define void @f2(double* %a, double* %b) {
-    %1 = load double, double* %a, align 8
-    %2 = load double, double* %b, align 8
+  define void @f2(ptr %a, ptr %b) {
+    %1 = load double, ptr %a, align 8
+    %2 = load double, ptr %b, align 8
     %add = fadd double %1, %2
-    store double %add, double* %a, align 8
+    store double %add, ptr %a, align 8
     ret void
   }
 
-  define void @f3(x86_fp80* %a, x86_fp80* %b) {
-    %1 = load x86_fp80, x86_fp80* %a, align 16
-    %2 = load x86_fp80, x86_fp80* %b, align 16
+  define void @f3(ptr %a, ptr %b) {
+    %1 = load x86_fp80, ptr %a, align 16
+    %2 = load x86_fp80, ptr %b, align 16
     %mul = fmul x86_fp80 %1, %2
-    store x86_fp80 %mul, x86_fp80* %a, align 16
+    store x86_fp80 %mul, ptr %a, align 16
     ret void
   }
 
-  define void @f4(float* %a, float* %b) {
-    %1 = load float, float* %a, align 4
-    %2 = load float, float* %b, align 4
+  define void @f4(ptr %a, ptr %b) {
+    %1 = load float, ptr %a, align 4
+    %2 = load float, ptr %b, align 4
     %div = fdiv float %1, %2
-    store float %div, float* %a, align 4
+    store float %div, ptr %a, align 4
     ret void
   }
 
-  define void @f5(float* %val, double* %ret) {
-    %1 = load float, float* %val, align 4
+  define void @f5(ptr %val, ptr %ret) {
+    %1 = load float, ptr %val, align 4
     %res = fpext float %1 to double
-    store double %res, double* %ret, align 8
+    store double %res, ptr %ret, align 8
     ret void
   }
 
-  define void @f6(double* %val, float* %ret) {
-    %1 = load double, double* %val, align 8
+  define void @f6(ptr %val, ptr %ret) {
+    %1 = load double, ptr %val, align 8
     %res = fptrunc double %1 to float
-    store float %res, float* %ret, align 4
+    store float %res, ptr %ret, align 4
     ret void
   }
 
-  define void @f7(float* %a) {
-    %1 = load float, float* %a, align 4
+  define void @f7(ptr %a) {
+    %1 = load float, ptr %a, align 4
     %res = call float @llvm.sqrt.f32(float %1)
-    store float %res, float* %a, align 4
+    store float %res, ptr %a, align 4
     ret void
   }
 


        


More information about the llvm-commits mailing list